^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2017 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Marc Zyngier <marc.zyngier@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/irqchip/arm-gic-v3.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "vgic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * How KVM uses GICv4 (insert rude comments here):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * The vgic-v4 layer acts as a bridge between several entities:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * - The GICv4 ITS representation offered by the ITS driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * - VFIO, which is in charge of the PCI endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * - The virtual ITS, which is the only thing the guest sees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * The configuration of VLPIs is triggered by a callback from VFIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * instructing KVM that a PCI device has been configured to deliver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * MSIs to a vITS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * kvm_vgic_v4_set_forwarding() is thus called with the routing entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * and this is used to find the corresponding vITS data structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * (ITS instance, device, event and irq) using a process that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * extremely similar to the injection of an MSI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * At this stage, we can link the guest's view of an LPI (uniquely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * identified by the routing entry) and the host irq, using the GICv4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * driver mapping operation. Should the mapping succeed, we've then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * successfully upgraded the guest's LPI to a VLPI. We can then start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * with updating GICv4's view of the property table and generating an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * INValidation in order to kickstart the delivery of this VLPI to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * guest directly, without software intervention. Well, almost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * When the PCI endpoint is deconfigured, this operation is reversed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * with VFIO calling kvm_vgic_v4_unset_forwarding().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Once the VLPI has been mapped, it needs to follow any change the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * guest performs on its LPI through the vITS. For that, a number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * command handlers have hooks to communicate these changes to the HW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * - Any invalidation triggers a call to its_prop_update_vlpi()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * - The INT command results in a irq_set_irqchip_state(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * generates an INT on the corresponding VLPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * - The CLEAR command results in a irq_set_irqchip_state(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * generates an CLEAR on the corresponding VLPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * - DISCARD translates into an unmap, similar to a call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * kvm_vgic_v4_unset_forwarding().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * - MOVI is translated by an update of the existing mapping, changing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * the target vcpu, resulting in a VMOVI being generated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * - MOVALL is translated by a string of mapping updates (similar to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * the handling of MOVI). MOVALL is horrible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Note that a DISCARD/MAPTI sequence emitted from the guest without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * reprogramming the PCI endpoint after MAPTI does not result in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * VLPI being mapped, as there is no callback from VFIO (the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * will get the interrupt via the normal SW injection). Fixing this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * not trivial, and requires some horrible messing with the VFIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * internals. Not fun. Don't do that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Then there is the scheduling. Each time a vcpu is about to run on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * physical CPU, KVM must tell the corresponding redistributor about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * it. And if we've migrated our vcpu from one CPU to another, we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * tell the ITS (so that the messages reach the right redistributor).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * This is done in two steps: first issue a irq_set_affinity() on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * irq corresponding to the vcpu, then call its_make_vpe_resident().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * You must be in a non-preemptible context. On exit, a call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * its_make_vpe_non_resident() tells the redistributor that we're done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * with the vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Finally, the doorbell handling: Each vcpu is allocated an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * which will fire each time a VLPI is made pending whilst the vcpu is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * not running. Each time the vcpu gets blocked, the doorbell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * interrupt gets enabled. When the vcpu is unblocked (for whatever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * reason), the doorbell interrupt is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct kvm_vcpu *vcpu = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* We got the message, no need to fire again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (!kvm_vgic_global_state.has_gicv4_1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) !irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) disable_irq_nosync(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * The v4.1 doorbell can fire concurrently with the vPE being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * made non-resident. Ensure we only update pending_last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * *after* the non-residency sequence has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) vpe->sgi_config[irq->intid].enabled = irq->enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) vpe->sgi_config[irq->intid].group = irq->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) vpe->sgi_config[irq->intid].priority = irq->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * With GICv4.1, every virtual SGI can be directly injected. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * let's pretend that they are HW interrupts, tied to a host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * IRQ. The SGI code will do its magic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) for (i = 0; i < VGIC_NR_SGIS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (irq->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) irq->hw = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) irq->host_irq = irq_find_mapping(vpe->sgi_domain, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* Transfer the full irq state to the vPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) vgic_v4_sync_sgi_config(vpe, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) desc = irq_to_desc(irq->host_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (!WARN_ON(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* Transfer pending state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ret = irq_set_irqchip_state(irq->host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) IRQCHIP_STATE_PENDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) irq->pending_latch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) irq->pending_latch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) for (i = 0; i < VGIC_NR_SGIS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (!irq->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) irq->hw = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ret = irq_get_irqchip_state(irq->host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) IRQCHIP_STATE_PENDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) &irq->pending_latch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) desc = irq_to_desc(irq->host_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Must be called with the kvm lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void vgic_v4_configure_vsgis(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct vgic_dist *dist = &kvm->arch.vgic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) kvm_arm_halt_guest(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (dist->nassgireq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) vgic_v4_enable_vsgis(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) vgic_v4_disable_vsgis(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) kvm_arm_resume_guest(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * vgic_v4_init - Initialize the GICv4 data structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * @kvm: Pointer to the VM being initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * We may be called each time a vITS is created, or when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * vgic is initialized. This relies on kvm->lock to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * held. In both cases, the number of vcpus should now be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * fixed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int vgic_v4_init(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct vgic_dist *dist = &kvm->arch.vgic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int i, nr_vcpus, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (!kvm_vgic_global_state.has_gicv4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return 0; /* Nothing to see here... move along. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (dist->its_vm.vpes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) nr_vcpus = atomic_read(&kvm->online_vcpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!dist->its_vm.vpes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) dist->its_vm.nr_vpes = nr_vcpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) kvm_for_each_vcpu(i, vcpu, kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ret = its_alloc_vcpu_irqs(&dist->its_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) kvm_err("VPE IRQ allocation failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) kfree(dist->its_vm.vpes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) dist->its_vm.nr_vpes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) dist->its_vm.vpes = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) int irq = dist->its_vm.vpes[i]->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) unsigned long irq_flags = DB_IRQ_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * Don't automatically enable the doorbell, as we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * flipping it back and forth when the vcpu gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * blocked. Also disable the lazy disabling, as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * doorbell could kick us out of the guest too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * early...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * On GICv4.1, the doorbell is managed in HW and must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * be left enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (kvm_vgic_global_state.has_gicv4_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) irq_flags &= ~IRQ_NOAUTOEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) irq_set_status_flags(irq, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ret = request_irq(irq, vgic_v4_doorbell_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 0, "vcpu", vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) kvm_err("failed to allocate vcpu IRQ%d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Trick: adjust the number of vpes so we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * how many to nuke on teardown...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) dist->its_vm.nr_vpes = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) vgic_v4_teardown(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * vgic_v4_teardown - Free the GICv4 data structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * @kvm: Pointer to the VM being destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * Relies on kvm->lock to be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) void vgic_v4_teardown(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!its_vm->vpes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) for (i = 0; i < its_vm->nr_vpes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) int irq = its_vm->vpes[i]->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) irq_clear_status_flags(irq, DB_IRQ_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) free_irq(irq, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) its_free_vcpu_irqs(its_vm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) kfree(its_vm->vpes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) its_vm->nr_vpes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) its_vm->vpes = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return its_make_vpe_non_resident(vpe, need_db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) int vgic_v4_load(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * Before making the VPE resident, make sure the redistributor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * corresponding to our current CPU expects us here. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * doc in drivers/irqchip/irq-gic-v4.c to understand how this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * turns into a VMOVP command at the ITS level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * Now that the VPE is resident, let's get rid of a potential
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * doorbell interrupt that would still be pending. This is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * GICv4.0 only "feature"...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (!kvm_vgic_global_state.has_gicv4_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) void vgic_v4_commit(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * No need to wait for the vPE to be ready across a shallow guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * exit, as only a vcpu_put will invalidate it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!vpe->ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) its_commit_vpe(vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static struct vgic_its *vgic_get_its(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct kvm_kernel_irq_routing_entry *irq_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct kvm_msi msi = (struct kvm_msi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) .address_lo = irq_entry->msi.address_lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) .address_hi = irq_entry->msi.address_hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) .data = irq_entry->msi.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) .flags = irq_entry->msi.flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) .devid = irq_entry->msi.devid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return vgic_msi_to_its(kvm, &msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct kvm_kernel_irq_routing_entry *irq_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct vgic_its *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct its_vlpi_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (!vgic_supports_direct_msis(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Get the ITS, and escape early on error (not a valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * doorbell for any of our vITSs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) its = vgic_get_its(kvm, irq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (IS_ERR(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) mutex_lock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* Perform the actual DevID/EventID -> LPI translation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) irq_entry->msi.data, &irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * Emit the mapping request. If it fails, the ITS probably
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * isn't v4 compatible, so let's silently bail out. Holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * the ITS lock should ensure that nothing can modify the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * target vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) map = (struct its_vlpi_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) .vm = &kvm->arch.vgic.its_vm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) .vintid = irq->intid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) .properties = ((irq->priority & 0xfc) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) (irq->enabled ? LPI_PROP_ENABLED : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) LPI_PROP_GROUP1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) .db_enabled = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) ret = its_map_vlpi(virq, &map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) irq->hw = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) irq->host_irq = virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) atomic_inc(&map.vpe->vlpi_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) mutex_unlock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct kvm_kernel_irq_routing_entry *irq_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct vgic_its *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (!vgic_supports_direct_msis(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * Get the ITS, and escape early on error (not a valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * doorbell for any of our vITSs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) its = vgic_get_its(kvm, irq_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (IS_ERR(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) mutex_lock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) irq_entry->msi.data, &irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) WARN_ON(!(irq->hw && irq->host_irq == virq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (irq->hw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) irq->hw = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ret = its_unmap_vlpi(virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) mutex_unlock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }