^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2015, 2016 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef __KVM_ARM_VGIC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define __KVM_ARM_VGIC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/irqreturn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/static_key.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <kvm/iodev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/jump_label.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/irqchip/arm-gic-v4.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define VGIC_V3_MAX_CPUS 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define VGIC_V2_MAX_CPUS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define VGIC_NR_IRQS_LEGACY 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define VGIC_NR_SGIS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define VGIC_NR_PPIS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define VGIC_MAX_SPI 1019
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define VGIC_MAX_RESERVED 1023
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define VGIC_MIN_LPI 8192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define KVM_IRQCHIP_NUM_PINS (1020 - 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) (irq) <= VGIC_MAX_SPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) enum vgic_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) VGIC_V2, /* Good ol' GICv2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) VGIC_V3, /* New fancy GICv3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* same for all guests, as depending only on the _host's_ GIC model */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct vgic_global {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* type of the host GIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) enum vgic_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* Physical address of vgic virtual cpu interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) phys_addr_t vcpu_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* GICV mapping, kernel VA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void __iomem *vcpu_base_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* GICV mapping, HYP VA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void __iomem *vcpu_hyp_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* virtual control interface mapping, kernel VA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void __iomem *vctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* virtual control interface mapping, HYP VA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) void __iomem *vctrl_hyp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Number of implemented list registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int nr_lr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Maintenance IRQ number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned int maint_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* maximum number of VCPUs allowed (GICv2 limits us to 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int max_gic_vcpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Only needed for the legacy KVM_CREATE_IRQCHIP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) bool can_emulate_gicv2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* Hardware has GICv4? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) bool has_gicv4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) bool has_gicv4_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* GIC system register CPU interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct static_key_false gicv3_cpuif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 ich_vtr_el2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) extern struct vgic_global kvm_vgic_global_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define VGIC_V2_MAX_LRS (1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define VGIC_V3_MAX_LRS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) enum vgic_irq_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) VGIC_CONFIG_EDGE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) VGIC_CONFIG_LEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct vgic_irq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) raw_spinlock_t irq_lock; /* Protects the content of the struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct list_head lpi_list; /* Used to link all LPIs together */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct list_head ap_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * SPIs and LPIs: The VCPU whose ap_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * this is queued on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct kvm_vcpu *target_vcpu; /* The VCPU that this interrupt should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * be sent to, as a result of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * targets reg (v2) or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * affinity reg (v3).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u32 intid; /* Guest visible INTID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) bool line_level; /* Level only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) bool pending_latch; /* The pending latch state used to calculate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * the pending state for both level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * and edge triggered IRQs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) bool active; /* not used for LPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) bool hw; /* Tied to HW IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct kref refcount; /* Used for LPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u32 hwintid; /* HW INTID number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned int host_irq; /* linux irq corresponding to hwintid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u8 targets; /* GICv2 target VCPUs mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u32 mpidr; /* GICv3 target VCPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u8 source; /* GICv2 SGIs only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u8 active_source; /* GICv2 SGIs only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u8 priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u8 group; /* 0 == group 0, 1 == group 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) enum vgic_irq_config config; /* Level or edge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * Callback function pointer to in-kernel devices that can tell us the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * state of the input level of mapped level-triggered IRQ faster than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * peaking into the physical GIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * Always called in non-preemptible section and the functions can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * kvm_arm_get_running_vcpu() to get the vcpu pointer for private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * IRQs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) bool (*get_input_level)(int vintid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) void *owner; /* Opaque pointer to reserve an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) for in-kernel devices. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct vgic_register_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct vgic_its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) enum iodev_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) IODEV_CPUIF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) IODEV_DIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) IODEV_REDIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) IODEV_ITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct vgic_io_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) gpa_t base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct kvm_vcpu *redist_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct vgic_its *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) const struct vgic_register_region *regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) enum iodev_type iodev_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int nr_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct kvm_io_device dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct vgic_its {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* The base address of the ITS control register frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) gpa_t vgic_its_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct vgic_io_device iodev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct kvm_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* These registers correspond to GITS_BASER{0,1} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u64 baser_device_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u64 baser_coll_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* Protects the command queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct mutex cmd_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) u64 cbaser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u32 creadr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u32 cwriter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* migration ABI revision in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u32 abi_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Protects the device and collection lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct mutex its_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct list_head device_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct list_head collection_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct vgic_state_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct vgic_redist_region {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) gpa_t base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u32 count; /* number of redistributors or 0 if single region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u32 free_index; /* index of the next free redistributor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct vgic_dist {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) bool in_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) bool ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) bool initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) u32 vgic_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Implementation revision as reported in the GICD_IIDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) u32 implementation_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* Userspace can write to GICv2 IGROUPR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) bool v2_groups_user_writable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* Do injected MSIs require an additional device ID? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) bool msis_require_devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) int nr_spis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* base addresses in guest physical address space: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) gpa_t vgic_dist_base; /* distributor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* either a GICv2 CPU interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) gpa_t vgic_cpu_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* or a number of GICv3 redistributor regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct list_head rd_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* distributor enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* Wants SGIs without active state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) bool nassgireq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct vgic_irq *spis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct vgic_io_device dist_iodev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) bool has_its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * Contains the attributes and gpa of the LPI configuration table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * one address across all redistributors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * GICv3 spec: IHI 0069E 6.1.1 "LPI Configuration tables"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) u64 propbaser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Protects the lpi_list and the count value below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) raw_spinlock_t lpi_list_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct list_head lpi_list_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int lpi_list_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* LPI translation cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct list_head lpi_translation_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* used by vgic-debug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct vgic_state_iter *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * GICv4 ITS per-VM data, containing the IRQ domain, the VPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * array, the property table pointer as well as allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * data. This essentially ties the Linux IRQ core and ITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * together, and avoids leaking KVM's data structures anywhere
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct its_vm its_vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct vgic_v2_cpu_if {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u32 vgic_hcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) u32 vgic_vmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) u32 vgic_apr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u32 vgic_lr[VGIC_V2_MAX_LRS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) unsigned int used_lrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct vgic_v3_cpu_if {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) u32 vgic_hcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u32 vgic_vmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u32 vgic_sre; /* Restored only, change ignored */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u32 vgic_ap0r[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) u32 vgic_ap1r[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u64 vgic_lr[VGIC_V3_MAX_LRS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * GICv4 ITS per-VPE data, containing the doorbell IRQ, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * pending table pointer, the its_vm pointer and a few other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * HW specific things. As for the its_vm structure, this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * linking the Linux IRQ subsystem and the ITS together.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct its_vpe its_vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) unsigned int used_lrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct vgic_cpu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* CPU vif control registers for world switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct vgic_v2_cpu_if vgic_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct vgic_v3_cpu_if vgic_v3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) raw_spinlock_t ap_list_lock; /* Protects the ap_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * List of IRQs that this VCPU should consider because they are either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Active or Pending (hence the name; AP list), or because they recently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * were one of the two and need to be migrated off this list to another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * VCPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct list_head ap_list_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * Members below are used with GICv3 emulation only and represent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * parts of the redistributor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct vgic_io_device rd_iodev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct vgic_redist_region *rdreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* Contains the attributes and gpa of the LPI pending tables. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) u64 pendbaser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) bool lpis_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* Cache guest priority bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) u32 num_pri_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* Cache guest interrupt ID bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) u32 num_id_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) extern struct static_key_false vgic_v2_cpuif_trap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) extern struct static_key_false vgic_v3_cpuif_trap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) void kvm_vgic_early_init(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int kvm_vgic_create(struct kvm *kvm, u32 type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) void kvm_vgic_destroy(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int kvm_vgic_map_resources(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int kvm_vgic_hyp_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) void kvm_vgic_init_cpu_hardware(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) bool level, void *owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) u32 vintid, bool (*get_input_level)(int vindid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) void kvm_vgic_load(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) void kvm_vgic_put(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #define vgic_initialized(k) ((k)->arch.vgic.initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) #define vgic_ready(k) ((k)->arch.vgic.ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) #define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * The host's GIC naturally limits the maximum amount of VCPUs a guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * can use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static inline int kvm_vgic_get_max_vcpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return kvm_vgic_global_state.max_gic_vcpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * kvm_vgic_setup_default_irq_routing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * Setup a default flat gsi routing table mapping all SPIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int kvm_vgic_setup_default_irq_routing(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct kvm_kernel_irq_routing_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct kvm_kernel_irq_routing_entry *irq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct kvm_kernel_irq_routing_entry *irq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int vgic_v4_load(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) void vgic_v4_commit(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) #endif /* __KVM_ARM_VGIC_H */