Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2015, 2016 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/list_sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <asm/kvm_hyp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include "vgic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) struct vgic_global kvm_vgic_global_state __ro_after_init = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 	.gicv3_cpuif = STATIC_KEY_FALSE_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * Locking order is always:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * kvm->lock (mutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  *   its->cmd_lock (mutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  *     its->its_lock (mutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *       vgic_cpu->ap_list_lock		must be taken with IRQs disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  *         kvm->lpi_list_lock		must be taken with IRQs disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *           vgic_irq->irq_lock		must be taken with IRQs disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * As the ap_list_lock might be taken from the timer interrupt handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * we have to disable IRQs before taking this lock and everything lower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * than it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * If you need to take multiple locks, always take the upper lock first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * then the lower ones, e.g. first take the its_lock, then the irq_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * If you are already holding a lock and need to take a higher one, you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * have to drop the lower ranking lock first and re-aquire it after having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * taken the upper one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * When taking more than one ap_list_lock at the same time, always take the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * lowest numbered VCPU's ap_list_lock first, so:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *   vcpuX->vcpu_id < vcpuY->vcpu_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  *     raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  *     raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  * Since the VGIC must support injecting virtual interrupts from ISRs, we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * spinlocks for any lock that may be taken while injecting an interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * Iterate over the VM's list of mapped LPIs to find the one with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * matching interrupt ID and return a reference to the IRQ structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	struct vgic_dist *dist = &kvm->arch.vgic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	struct vgic_irq *irq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		if (irq->intid != intid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		 * This increases the refcount, the caller is expected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		 * call vgic_put_irq() later once it's finished with the IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		vgic_get_irq_kref(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	irq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  * This looks up the virtual interrupt ID to get the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  * struct vgic_irq. It also increases the refcount, so any caller is expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  * to call vgic_put_irq() once it's finished with this IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 			      u32 intid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	/* SGIs and PPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	if (intid <= VGIC_MAX_PRIVATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		return &vcpu->arch.vgic_cpu.private_irqs[intid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	/* SPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	/* LPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	if (intid >= VGIC_MIN_LPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		return vgic_get_lpi(kvm, intid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	WARN(1, "Looking up struct vgic_irq for reserved INTID");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  * We can't do anything in here, because we lack the kvm pointer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  * lock and remove the item from the lpi_list. So we keep this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  * empty and use the return value of kref_put() to trigger the freeing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) static void vgic_irq_release(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  * Drop the refcount on the LPI. Must be called with lpi_list_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	struct vgic_dist *dist = &kvm->arch.vgic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	if (!kref_put(&irq->refcount, vgic_irq_release))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	list_del(&irq->lpi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	dist->lpi_list_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	kfree(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	struct vgic_dist *dist = &kvm->arch.vgic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	if (irq->intid < VGIC_MIN_LPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	__vgic_put_lpi_locked(kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	struct vgic_irq *irq, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		if (irq->intid >= VGIC_MIN_LPI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 			raw_spin_lock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 			list_del(&irq->ap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			irq->vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			raw_spin_unlock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 			vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	WARN_ON(irq_set_irqchip_state(irq->host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 				      IRQCHIP_STATE_PENDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 				      pending));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) bool vgic_get_phys_line_level(struct vgic_irq *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	bool line_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	BUG_ON(!irq->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	if (irq->get_input_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		return irq->get_input_level(irq->intid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	WARN_ON(irq_get_irqchip_state(irq->host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 				      IRQCHIP_STATE_PENDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 				      &line_level));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	return line_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) /* Set/Clear the physical active state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	BUG_ON(!irq->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	WARN_ON(irq_set_irqchip_state(irq->host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 				      IRQCHIP_STATE_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 				      active));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205)  * kvm_vgic_target_oracle - compute the target vcpu for an irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  * @irq:	The irq to route. Must be already locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209)  * Based on the current state of the interrupt (enabled, pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  * active, vcpu and target_vcpu), compute the next vcpu this should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211)  * given to. Return NULL if this shouldn't be injected at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  * Requires the IRQ lock to be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	lockdep_assert_held(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	/* If the interrupt is active, it must stay on the current vcpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	if (irq->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		return irq->vcpu ? : irq->target_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	 * If the IRQ is not active but enabled and pending, we should direct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	 * it to its configured target VCPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	 * If the distributor is disabled, pending interrupts shouldn't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	 * forwarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	if (irq->enabled && irq_is_pending(irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		if (unlikely(irq->target_vcpu &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 			     !irq->target_vcpu->kvm->arch.vgic.enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		return irq->target_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	/* If neither active nor pending and enabled, then this IRQ should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	 * be queued to any VCPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244)  * The order of items in the ap_lists defines how we'll pack things in LRs as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245)  * well, the first items in the list being the first things populated in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  * LRs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  * A hard rule is that active interrupts can never be pushed out of the LRs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249)  * (and therefore take priority) since we cannot reliably trap on deactivation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250)  * of IRQs and therefore they have to be present in the LRs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  * Otherwise things should be sorted by the priority field and the GIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  * hardware support will take care of preemption of priority groups etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  * Return negative if "a" sorts before "b", 0 to preserve order, and positive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  * to sort "b" before "a".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	bool penda, pendb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	 * list_sort may call this function with the same element when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	 * the list is fairly long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	if (unlikely(irqa == irqb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	raw_spin_lock(&irqa->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	if (irqa->active || irqb->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		ret = (int)irqb->active - (int)irqa->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	penda = irqa->enabled && irq_is_pending(irqa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	pendb = irqb->enabled && irq_is_pending(irqb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	if (!penda || !pendb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		ret = (int)pendb - (int)penda;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	/* Both pending and enabled, sort by priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	ret = irqa->priority - irqb->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	raw_spin_unlock(&irqb->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	raw_spin_unlock(&irqa->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) /* Must be called with the ap_list_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	lockdep_assert_held(&vgic_cpu->ap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307)  * Only valid injection if changing level for level-triggered IRQs or for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308)  * rising edge, and in-kernel connected IRQ lines can only be controlled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  * their owner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	if (irq->owner != owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	switch (irq->config) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	case VGIC_CONFIG_LEVEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		return irq->line_level != level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	case VGIC_CONFIG_EDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		return level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  * Do the queuing if necessary, taking the right locks in the right order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  * Returns true when the IRQ was queued, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  * Needs to be entered with the IRQ lock already held, but will return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * with all locks dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 			   unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	lockdep_assert_held(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	vcpu = vgic_target_oracle(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (irq->vcpu || !vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		 * If this IRQ is already on a VCPU's ap_list, then it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		 * cannot be moved or modified and there is no more work for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		 * us to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		 * Otherwise, if the irq is not pending and enabled, it does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		 * not need to be inserted into an ap_list and there is also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		 * no more work for us to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		 * We have to kick the VCPU here, because we could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		 * queueing an edge-triggered interrupt for which we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		 * get no EOI maintenance interrupt. In that case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		 * while the IRQ is already on the VCPU's AP list, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		 * VCPU could have EOI'ed the original interrupt and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		 * won't see this one until it exits for some other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		 * reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		if (vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 			kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	 * We must unlock the irq lock to take the ap_list_lock where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	 * we are going to insert this new pending interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	/* someone can do stuff here, which we re-check below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	raw_spin_lock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	 * Did something change behind our backs?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	 * There are two cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	 * 1) The irq lost its pending state or was disabled behind our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	 *    backs and/or it was queued to another VCPU's ap_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	 * 2) Someone changed the affinity on this irq behind our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	 *    backs and we are now holding the wrong ap_list_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	 * In both cases, drop the locks and retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		raw_spin_unlock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 					   flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	 * Grab a reference to the irq to reflect the fact that it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	 * now in the ap_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	vgic_get_irq_kref(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	irq->vcpu = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	raw_spin_unlock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421)  * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  * @kvm:     The VM structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  * @cpuid:   The CPU for PPIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  * @intid:   The INTID to inject a new state to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  * @level:   Edge-triggered:  true:  to trigger the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  *			      false: to ignore the call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  *	     Level-sensitive  true:  raise the input signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  *			      false: lower the input signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429)  * @owner:   The opaque pointer to the owner of the IRQ being raised to verify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  *           that the caller is allowed to inject this IRQ.  Userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431)  *           injections will have owner == NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433)  * The VGIC is not concerned with devices being active-LOW or active-HIGH for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434)  * level-sensitive interrupts.  You can think of the level parameter as 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435)  * being HIGH and 0 being LOW and all devices being active-HIGH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			bool level, void *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	trace_vgic_update_irq_pending(cpuid, intid, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	ret = vgic_lazy_init(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	vcpu = kvm_get_vcpu(kvm, cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	irq = vgic_get_irq(kvm, vcpu, intid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	if (!irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	if (!vgic_validate_injection(irq, level, owner)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		/* Nothing to see here, move along... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		vgic_put_irq(kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	if (irq->config == VGIC_CONFIG_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		irq->line_level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		irq->pending_latch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	vgic_queue_irq_unlock(kvm, irq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	vgic_put_irq(kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) /* @irq->irq_lock must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			    unsigned int host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			    bool (*get_input_level)(int vindid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	struct irq_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	 * Find the physical IRQ number corresponding to @host_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	desc = irq_to_desc(host_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		kvm_err("%s: no interrupt descriptor\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	data = irq_desc_get_irq_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	while (data->parent_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		data = data->parent_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	irq->hw = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	irq->host_irq = host_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	irq->hwintid = data->hwirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	irq->get_input_level = get_input_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) /* @irq->irq_lock must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	irq->hw = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	irq->hwintid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	irq->get_input_level = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 			  u32 vintid, bool (*get_input_level)(int vindid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	BUG_ON(!irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532)  * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533)  * @vcpu: The VCPU pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534)  * @vintid: The INTID of the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536)  * Reset the active and pending states of a mapped interrupt.  Kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537)  * subsystems injecting mapped interrupts should reset their interrupt lines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538)  * when we are doing a reset of the VM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	if (!irq->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	irq->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	irq->pending_latch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	irq->line_level = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	if (!vgic_initialized(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	BUG_ON(!irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	kvm_vgic_unmap_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  * @vcpu:   Pointer to the VCPU (used for PPIs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580)  * @intid:  The virtual INTID identifying the interrupt (PPI or SPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  * @owner:  Opaque pointer to the owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  * Returns 0 if intid is not already used by another in-kernel device and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  * owner is set, otherwise returns an error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	if (!vgic_initialized(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	/* SGIs and LPIs cannot be wired up to any device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	if (irq->owner && irq->owner != owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		ret = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		irq->owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  * vgic_prune_ap_list - Remove non-relevant interrupts from the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613)  * @vcpu: The VCPU pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615)  * Go over the list of "interesting" interrupts, and prune those that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616)  * won't have to consider in the near future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	struct vgic_irq *irq, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	raw_spin_lock(&vgic_cpu->ap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		bool target_vcpu_needs_kick = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		raw_spin_lock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		BUG_ON(vcpu != irq->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		target_vcpu = vgic_target_oracle(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		if (!target_vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 			 * We don't need to process this interrupt any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 			 * further, move it off the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			list_del(&irq->ap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			irq->vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			raw_spin_unlock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			 * This vgic_put_irq call matches the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			 * vgic_get_irq_kref in vgic_queue_irq_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			 * where we added the LPI to the ap_list. As
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			 * we remove the irq from the list, we drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			 * also drop the refcount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		if (target_vcpu == vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			/* We're on the right CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			raw_spin_unlock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		/* This interrupt looks like it has to be migrated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		raw_spin_unlock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		raw_spin_unlock(&vgic_cpu->ap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		 * Ensure locking order by always locking the smallest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		 * ID first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			vcpuA = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			vcpuB = target_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			vcpuA = target_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			vcpuB = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 				      SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		raw_spin_lock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		 * If the affinity has been preserved, move the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		 * interrupt around. Otherwise, it means things have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		 * changed while the interrupt was unlocked, and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		 * need to replay this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		 * In all cases, we cannot trust the list not to have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		 * changed, so we restart from the beginning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		if (target_vcpu == vgic_target_oracle(irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 			struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 			list_del(&irq->ap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			irq->vcpu = target_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			target_vcpu_needs_kick = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		raw_spin_unlock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		if (target_vcpu_needs_kick) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 			kvm_vcpu_kick(target_vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	raw_spin_unlock(&vgic_cpu->ap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	if (kvm_vgic_global_state.type == VGIC_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		vgic_v2_fold_lr_state(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		vgic_v3_fold_lr_state(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) /* Requires the irq_lock to be held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 				    struct vgic_irq *irq, int lr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	lockdep_assert_held(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (kvm_vgic_global_state.type == VGIC_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		vgic_v2_populate_lr(vcpu, irq, lr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		vgic_v3_populate_lr(vcpu, irq, lr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	if (kvm_vgic_global_state.type == VGIC_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		vgic_v2_clear_lr(vcpu, lr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		vgic_v3_clear_lr(vcpu, lr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (kvm_vgic_global_state.type == VGIC_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		vgic_v2_set_underflow(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		vgic_v3_set_underflow(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) /* Requires the ap_list_lock to be held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 				 bool *multi_sgi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	*multi_sgi = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	lockdep_assert_held(&vgic_cpu->ap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		int w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		raw_spin_lock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		/* GICv2 SGIs can count for more than one... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		w = vgic_irq_get_lr_count(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		raw_spin_unlock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		count += w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		*multi_sgi |= (w > 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) /* Requires the VCPU's ap_list_lock to be held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	bool multi_sgi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	u8 prio = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	lockdep_assert_held(&vgic_cpu->ap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	count = compute_ap_list_depth(vcpu, &multi_sgi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		vgic_sort_ap_list(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		raw_spin_lock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		 * If we have multi-SGIs in the pipeline, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		 * guarantee that they are all seen before any IRQ of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		 * lower priority. In that case, we need to filter out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		 * these interrupts by exiting early. This is easy as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		 * the AP list has been sorted already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		if (multi_sgi && irq->priority > prio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			_raw_spin_unlock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		if (likely(vgic_target_oracle(irq) == vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			vgic_populate_lr(vcpu, irq, count++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			if (irq->source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 				prio = irq->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		raw_spin_unlock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		if (count == kvm_vgic_global_state.nr_lr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			if (!list_is_last(&irq->ap_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 					  &vgic_cpu->ap_list_head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 				vgic_set_underflow(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	/* Nuke remaining LRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		vgic_clear_lr(vcpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) static inline bool can_access_vgic_from_kernel(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	 * GICv2 can always be accessed from the kernel because it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	 * memory-mapped, and VHE systems can access GICv3 EL2 system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	 * registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) static inline void vgic_save_state(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		vgic_v2_save_state(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) /* Sync back the hardware VGIC state into our emulation after a guest's run. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	int used_lrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	/* An empty ap_list_head implies used_lrs == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	if (can_access_vgic_from_kernel())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		vgic_save_state(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	if (used_lrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		vgic_fold_lr_state(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	vgic_prune_ap_list(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		vgic_v2_restore_state(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) /* Flush our emulation state into the GIC hardware before entering the guest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	 * If there are no virtual interrupts active or pending for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	 * VCPU, then there is no work to do and we can bail out without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	 * taking any lock.  There is a potential race with someone injecting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	 * interrupts to the VCPU, but it is a benign race as the VCPU will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	 * either observe the new interrupt before or after doing this check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	 * and introducing additional synchronization mechanism doesn't change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	 * this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	 * Note that we still need to go through the whole thing if anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	 * can be directly injected (GICv4).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	    !vgic_supports_direct_msis(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		vgic_flush_lr_state(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	if (can_access_vgic_from_kernel())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		vgic_restore_state(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	if (vgic_supports_direct_msis(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		vgic_v4_commit(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) void kvm_vgic_load(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	if (unlikely(!vgic_initialized(vcpu->kvm)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	if (kvm_vgic_global_state.type == VGIC_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		vgic_v2_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		vgic_v3_load(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) void kvm_vgic_put(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	if (unlikely(!vgic_initialized(vcpu->kvm)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	if (kvm_vgic_global_state.type == VGIC_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		vgic_v2_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		vgic_v3_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	if (kvm_vgic_global_state.type == VGIC_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		vgic_v2_vmcr_sync(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		vgic_v3_vmcr_sync(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	bool pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	struct vgic_vmcr vmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	if (!vcpu->kvm->arch.vgic.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	vgic_get_vmcr(vcpu, &vmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		raw_spin_lock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		pending = irq_is_pending(irq) && irq->enabled &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			  !irq->active &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			  irq->priority < vmcr.pmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		raw_spin_unlock(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		if (pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	return pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) void vgic_kick_vcpus(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	int c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	 * We've injected an interrupt, time to find out who deserves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	 * a good kick...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	kvm_for_each_vcpu(c, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		if (kvm_vgic_vcpu_pending_irq(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			kvm_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	bool map_is_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	if (!vgic_initialized(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	map_is_active = irq->hw && irq->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	return map_is_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }