^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * VGIC MMIO handling functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/bsearch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <kvm/iodev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <kvm/arm_arch_timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <kvm/arm_vgic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "vgic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "vgic-mmio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return -1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned int len, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* Ignore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned int len, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Ignore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u32 value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Loop over all IRQs affected by this read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) for (i = 0; i < len * 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (irq->group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) value |= BIT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static void vgic_update_vsgi(struct vgic_irq *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned int len, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) for (i = 0; i < len * 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) irq->group = !!(val & BIT(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) vgic_update_vsgi(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * of the enabled bit, so there is only one function for both here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) u32 value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Loop over all IRQs affected by this read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) for (i = 0; i < len * 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (irq->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) value |= (1U << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) for_each_set_bit(i, &val, len * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!irq->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct irq_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) irq->enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) data = &irq_to_desc(irq->host_irq)->irq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) while (irqd_irq_disabled(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) enable_irq(irq->host_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) } else if (vgic_irq_is_mapped_level(irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) bool was_high = irq->line_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * We need to update the state of the interrupt because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * the guest might have changed the state of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * while the interrupt was disabled at the VGIC level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) irq->line_level = vgic_get_phys_line_level(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Deactivate the physical interrupt so the GIC will let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * us know when it is asserted again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (!irq->active && was_high && !irq->line_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) vgic_irq_set_phys_active(irq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) irq->enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) for_each_set_bit(i, &val, len * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) disable_irq_nosync(irq->host_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) irq->enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) for_each_set_bit(i, &val, len * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) irq->enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) for_each_set_bit(i, &val, len * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) irq->enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u32 value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* Loop over all IRQs affected by this read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) for (i = 0; i < len * 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) bool val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) val = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) err = irq_get_irqchip_state(irq->host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) IRQCHIP_STATE_PENDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) } else if (vgic_irq_is_mapped_level(irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) val = vgic_get_phys_line_level(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) val = irq_is_pending(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) value |= ((u32)val << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return (vgic_irq_is_sgi(irq->intid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) for_each_set_bit(i, &val, len * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* GICD_ISPENDR0 SGI bits are WI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (is_vgic_v2_sgi(vcpu, irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* HW SGI? Ask the GIC to inject it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) err = irq_set_irqchip_state(irq->host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) IRQCHIP_STATE_PENDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) irq->pending_latch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (irq->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) vgic_irq_set_phys_active(irq, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) for_each_set_bit(i, &val, len * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) irq->pending_latch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * GICv2 SGIs are terribly broken. We can't restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * the source of the interrupt, so just pick the vcpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * itself as the source...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (is_vgic_v2_sgi(vcpu, irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) irq->source |= BIT(vcpu->vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* Must be called with irq->irq_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) irq->pending_latch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * We don't want the guest to effectively mask the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * interrupt by doing a write to SPENDR followed by a write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * CPENDR for HW interrupts, so we clear the active state on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * the physical side if the virtual interrupt is not active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * This may lead to taking an additional interrupt on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * host, but that should not be a problem as the worst that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * can happen is an additional vgic injection. We also clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * the pending state to maintain proper semantics for edge HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) vgic_irq_set_phys_pending(irq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!irq->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) vgic_irq_set_phys_active(irq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) for_each_set_bit(i, &val, len * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* GICD_ICPENDR0 SGI bits are WI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (is_vgic_v2_sgi(vcpu, irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* HW SGI? Ask the GIC to clear its pending bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) err = irq_set_irqchip_state(irq->host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) IRQCHIP_STATE_PENDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (irq->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) vgic_hw_irq_cpending(vcpu, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) irq->pending_latch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) for_each_set_bit(i, &val, len * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * More fun with GICv2 SGIs! If we're clearing one of them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * from userspace, which source vcpu to clear? Let's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * even think of it, and blow the whole set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (is_vgic_v2_sgi(vcpu, irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) irq->source = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) irq->pending_latch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * is not queued on some running VCPU's LRs, because then the change to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * active state can be overwritten when the VCPU's state is synced coming back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * from the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * For shared interrupts as well as GICv3 private interrupts, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * stop all the VCPUs because interrupts can be migrated while we don't hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * the IRQ locks and we don't want to be chasing moving targets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * For GICv2 private interrupts we don't have to do anything because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * userspace accesses to the VGIC state already require all VCPUs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * stopped, and only the VCPU itself can modify its private interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * active state, which guarantees that the VCPU is not running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) intid >= VGIC_NR_PRIVATE_IRQS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) kvm_arm_halt_guest(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* See vgic_access_active_prepare */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) intid >= VGIC_NR_PRIVATE_IRQS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) kvm_arm_resume_guest(vcpu->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) u32 value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* Loop over all IRQs affected by this read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) for (i = 0; i < len * 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * Even for HW interrupts, don't evaluate the HW state as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * all the guest is interested in is the virtual state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (irq->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) value |= (1U << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) mutex_lock(&vcpu->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) vgic_access_active_prepare(vcpu, intid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) val = __vgic_mmio_read_active(vcpu, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) vgic_access_active_finish(vcpu, intid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) mutex_unlock(&vcpu->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return __vgic_mmio_read_active(vcpu, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* Must be called with irq->irq_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) bool active, bool is_uaccess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (is_uaccess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) irq->active = active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) vgic_irq_set_phys_active(irq, active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) bool active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct kvm_vcpu *requester_vcpu = kvm_get_running_vcpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (irq->hw && !vgic_irq_is_sgi(irq->intid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * GICv4.1 VSGI feature doesn't track an active state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * so let's not kid ourselves, there is nothing we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * do here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) irq->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) u32 model = vcpu->kvm->arch.vgic.vgic_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) u8 active_source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) irq->active = active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * The GICv2 architecture indicates that the source CPUID for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * an SGI should be provided during an EOI which implies that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * the active state is stored somewhere, but at the same time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * this state is not architecturally exposed anywhere and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * have no way of knowing the right source.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * This may lead to a VCPU not being able to receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * additional instances of a particular SGI after migration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * for a GICv2 VM on some GIC implementations. Oh well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) active && vgic_irq_is_sgi(irq->intid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) irq->active_source = active_source;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (irq->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) for_each_set_bit(i, &val, len * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) vgic_mmio_change_active(vcpu, irq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) mutex_lock(&vcpu->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) vgic_access_active_prepare(vcpu, intid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) __vgic_mmio_write_cactive(vcpu, addr, len, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) vgic_access_active_finish(vcpu, intid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) mutex_unlock(&vcpu->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) __vgic_mmio_write_cactive(vcpu, addr, len, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) for_each_set_bit(i, &val, len * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) vgic_mmio_change_active(vcpu, irq, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) mutex_lock(&vcpu->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) vgic_access_active_prepare(vcpu, intid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) __vgic_mmio_write_sactive(vcpu, addr, len, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) vgic_access_active_finish(vcpu, intid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) mutex_unlock(&vcpu->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) __vgic_mmio_write_sactive(vcpu, addr, len, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) for (i = 0; i < len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) val |= (u64)irq->priority << (i * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * We currently don't handle changing the priority of an interrupt that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * is already pending on a VCPU. If there is a need for this, we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * need to make this VCPU exit and re-evaluate the priorities, potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * leading to this interrupt getting presented now to the guest (if it has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * been masked by the priority mask before).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) for (i = 0; i < len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* Narrow the priority range to what we actually support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (irq->hw && vgic_irq_is_sgi(irq->intid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) vgic_update_vsgi(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) u32 value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) for (i = 0; i < len * 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (irq->config == VGIC_CONFIG_EDGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) value |= (2U << (i * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) for (i = 0; i < len * 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * The configuration cannot be changed for SGIs in general,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * code relies on PPIs being level triggered, so we also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * make them read-only here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (intid + i < VGIC_NR_PRIVATE_IRQS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (test_bit(i * 2 + 1, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) irq->config = VGIC_CONFIG_EDGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) irq->config = VGIC_CONFIG_LEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) u64 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) val |= (1U << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) const u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) bool new_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * Line level is set irrespective of irq type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * (level or edge) to avoid dependency that VM should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * restore irq config before line level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) new_level = !!(val & (1U << i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) irq->line_level = new_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (new_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static int match_region(const void *key, const void *elt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) const unsigned int offset = (unsigned long)key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) const struct vgic_register_region *region = elt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (offset < region->reg_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (offset >= region->reg_offset + region->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) const struct vgic_register_region *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) vgic_find_mmio_region(const struct vgic_register_region *regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) int nr_regions, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) sizeof(regions[0]), match_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (kvm_vgic_global_state.type == VGIC_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) vgic_v2_set_vmcr(vcpu, vmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) vgic_v3_set_vmcr(vcpu, vmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (kvm_vgic_global_state.type == VGIC_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) vgic_v2_get_vmcr(vcpu, vmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) vgic_v3_get_vmcr(vcpu, vmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * kvm_mmio_read_buf() returns a value in a format where it can be converted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * to a byte array and be directly observed as the guest wanted it to appear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * in memory if it had done the store itself, which is LE for the GIC, as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * guest knows the GIC is always LE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * We convert this value to the CPUs native format to deal with it as a data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) unsigned long data = kvm_mmio_read_buf(val, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) switch (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return le16_to_cpu(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return le32_to_cpu(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return le64_to_cpu(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * kvm_mmio_write_buf() expects a value in a format such that if converted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * a byte array it is observed as the guest would see it if it could perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * the load directly. Since the GIC is LE, and the guest knows this, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * guest expects a value in little endian format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * We convert the data value from the CPUs native format to LE so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * value is returned in the proper format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) switch (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) data = cpu_to_le16(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) data = cpu_to_le32(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) data = cpu_to_le64(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) kvm_mmio_write_buf(buf, len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return container_of(dev, struct vgic_io_device, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) static bool check_region(const struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) const struct vgic_register_region *region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) gpa_t addr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) switch (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) case sizeof(u8):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) flags = VGIC_ACCESS_8bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) case sizeof(u32):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) flags = VGIC_ACCESS_32bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) case sizeof(u64):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) flags = VGIC_ACCESS_64bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (!region->bits_per_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* Do we access a non-allocated IRQ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) const struct vgic_register_region *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) gpa_t addr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) const struct vgic_register_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) addr - iodev->base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (!region || !check_region(vcpu->kvm, region, addr, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) gpa_t addr, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) const struct vgic_register_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct kvm_vcpu *r_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (!region) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (region->uaccess_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) *val = region->read(r_vcpu, addr, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) gpa_t addr, const u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) const struct vgic_register_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct kvm_vcpu *r_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (!region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (region->uaccess_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) region->write(r_vcpu, addr, sizeof(u32), *val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * Userland access to VGIC registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) bool is_write, int offset, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) gpa_t addr, int len, void *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) const struct vgic_register_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) unsigned long data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) region = vgic_get_mmio_region(vcpu, iodev, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (!region) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) memset(val, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) switch (iodev->iodev_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) case IODEV_CPUIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) data = region->read(vcpu, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) case IODEV_DIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) data = region->read(vcpu, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) case IODEV_REDIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) data = region->read(iodev->redist_vcpu, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) case IODEV_ITS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) data = region->its_read(vcpu->kvm, iodev->its, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) vgic_data_host_to_mmio_bus(val, len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) gpa_t addr, int len, const void *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) const struct vgic_register_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) unsigned long data = vgic_data_mmio_bus_to_host(val, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) region = vgic_get_mmio_region(vcpu, iodev, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (!region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) switch (iodev->iodev_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) case IODEV_CPUIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) region->write(vcpu, addr, len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) case IODEV_DIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) region->write(vcpu, addr, len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) case IODEV_REDIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) region->write(iodev->redist_vcpu, addr, len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) case IODEV_ITS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) region->its_write(vcpu->kvm, iodev->its, addr, len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct kvm_io_device_ops kvm_io_gic_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) .read = dispatch_mmio_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) .write = dispatch_mmio_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) enum vgic_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) case VGIC_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) len = vgic_v2_init_dist_iodev(io_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) case VGIC_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) len = vgic_v3_init_dist_iodev(io_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) io_device->base_addr = dist_base_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) io_device->iodev_type = IODEV_DIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) io_device->redist_vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) mutex_lock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) len, &io_device->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) mutex_unlock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }