^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * irq_comm.c: Common API for in kernel interrupt controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2007, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright 2010 Red Hat, Inc. and/or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <trace/events/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/msidef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "irq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "ioapic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "lapic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "hyperv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "x86.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct kvm *kvm, int irq_source_id, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) bool line_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct kvm_pic *pic = kvm->arch.vpic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct kvm *kvm, int irq_source_id, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) bool line_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct kvm_ioapic *ioapic = kvm->arch.vioapic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) line_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct kvm_lapic_irq *irq, struct dest_map *dest_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int i, r = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct kvm_vcpu *vcpu, *lowest = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned long dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned int dest_vcpus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (irq->dest_mode == APIC_DEST_PHYSICAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) irq->dest_id == 0xff && kvm_lowest_prio_delivery(irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) irq->delivery_mode = APIC_DM_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (!kvm_apic_present(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) irq->dest_id, irq->dest_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!kvm_lowest_prio_delivery(irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) r += kvm_apic_set_irq(vcpu, irq, dest_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) } else if (kvm_apic_sw_enabled(vcpu->arch.apic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!kvm_vector_hashing_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!lowest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) lowest = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) lowest = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) __set_bit(i, dest_vcpu_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) dest_vcpus++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (dest_vcpus != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int idx = kvm_vector_to_index(irq->vector, dest_vcpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) dest_vcpu_bitmap, KVM_MAX_VCPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) lowest = kvm_get_vcpu(kvm, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (lowest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) r = kvm_apic_set_irq(lowest, irq, dest_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct kvm_lapic_irq *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) trace_kvm_msi_set_irq(e->msi.address_lo | (kvm->arch.x2apic_format ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) (u64)e->msi.address_hi << 32 : 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) e->msi.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) irq->dest_id = (e->msi.address_lo &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (kvm->arch.x2apic_format)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) irq->dest_id |= MSI_ADDR_EXT_DEST_ID(e->msi.address_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) irq->vector = (e->msi.data &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) irq->dest_mode = kvm_lapic_irq_dest_mode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) !!((1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) irq->delivery_mode = e->msi.data & 0x700;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) irq->msi_redir_hint = ((e->msi.address_lo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) & MSI_ADDR_REDIRECTION_LOWPRI) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) irq->level = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) irq->shorthand = APIC_DEST_NOSHORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) EXPORT_SYMBOL_GPL(kvm_set_msi_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline bool kvm_msi_route_invalid(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct kvm_kernel_irq_routing_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct kvm *kvm, int irq_source_id, int level, bool line_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct kvm_lapic_irq irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (kvm_msi_route_invalid(kvm, e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (!level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) kvm_set_msi_irq(kvm, e, &irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct kvm *kvm, int irq_source_id, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) bool line_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (!level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct kvm *kvm, int irq_source_id, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) bool line_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct kvm_lapic_irq irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) switch (e->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) case KVM_IRQ_ROUTING_HV_SINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return kvm_hv_set_sint(e, kvm, irq_source_id, level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) line_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) case KVM_IRQ_ROUTING_MSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (kvm_msi_route_invalid(kvm, e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) kvm_set_msi_irq(kvm, e, &irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return -EWOULDBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int kvm_request_irq_source_id(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int irq_source_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) mutex_lock(&kvm->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (irq_source_id >= BITS_PER_LONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) irq_source_id = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) set_bit(irq_source_id, bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) mutex_unlock(&kvm->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return irq_source_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) mutex_lock(&kvm->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (irq_source_id < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) irq_source_id >= BITS_PER_LONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (!irqchip_kernel(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) kvm_pic_clear_all(kvm->arch.vpic, irq_source_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) mutex_unlock(&kvm->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct kvm_irq_mask_notifier *kimn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) mutex_lock(&kvm->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) kimn->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) hlist_add_head_rcu(&kimn->link, &kvm->arch.mask_notifier_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) mutex_unlock(&kvm->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct kvm_irq_mask_notifier *kimn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) mutex_lock(&kvm->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) hlist_del_rcu(&kimn->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) mutex_unlock(&kvm->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) synchronize_srcu(&kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) bool mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct kvm_irq_mask_notifier *kimn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int idx, gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) idx = srcu_read_lock(&kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (gsi != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) hlist_for_each_entry_rcu(kimn, &kvm->arch.mask_notifier_list, link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (kimn->irq == gsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) kimn->func(kimn, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) srcu_read_unlock(&kvm->irq_srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return irqchip_in_kernel(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int kvm_set_routing_entry(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct kvm_kernel_irq_routing_entry *e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) const struct kvm_irq_routing_entry *ue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* We can't check irqchip_in_kernel() here as some callers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * currently inititalizing the irqchip. Other callers should therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * check kvm_arch_can_set_irq_routing() before calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) switch (ue->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) case KVM_IRQ_ROUTING_IRQCHIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (irqchip_split(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) e->irqchip.pin = ue->u.irqchip.pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) switch (ue->u.irqchip.irqchip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) case KVM_IRQCHIP_PIC_SLAVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) e->irqchip.pin += PIC_NUM_PINS / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) case KVM_IRQCHIP_PIC_MASTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) e->set = kvm_set_pic_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) case KVM_IRQCHIP_IOAPIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (ue->u.irqchip.pin >= KVM_IOAPIC_NUM_PINS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) e->set = kvm_set_ioapic_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) e->irqchip.irqchip = ue->u.irqchip.irqchip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) case KVM_IRQ_ROUTING_MSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) e->set = kvm_set_msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) e->msi.address_lo = ue->u.msi.address_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) e->msi.address_hi = ue->u.msi.address_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) e->msi.data = ue->u.msi.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (kvm_msi_route_invalid(kvm, e))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) case KVM_IRQ_ROUTING_HV_SINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) e->set = kvm_hv_set_sint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) e->hv_sint.vcpu = ue->u.hv_sint.vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) e->hv_sint.sint = ue->u.hv_sint.sint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct kvm_vcpu **dest_vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int i, r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (!kvm_apic_present(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!kvm_apic_match_dest(vcpu, NULL, irq->shorthand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) irq->dest_id, irq->dest_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (++r == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *dest_vcpu = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return r == 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) EXPORT_SYMBOL_GPL(kvm_intr_is_single_vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #define IOAPIC_ROUTING_ENTRY(irq) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #define PIC_ROUTING_ENTRY(irq) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) #define ROUTING_ENTRY2(irq) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static const struct kvm_irq_routing_entry default_routing[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int kvm_setup_default_irq_routing(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return kvm_set_irq_routing(kvm, default_routing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) ARRAY_SIZE(default_routing), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static const struct kvm_irq_routing_entry empty_routing[] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int kvm_setup_empty_irq_routing(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return kvm_set_irq_routing(kvm, empty_routing, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) void kvm_arch_post_irq_routing_update(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (!irqchip_split(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) kvm_make_scan_ioapic_request(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ulong *ioapic_handled_vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct kvm *kvm = vcpu->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct kvm_kernel_irq_routing_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct kvm_irq_routing_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) u32 i, nr_ioapic_pins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) idx = srcu_read_lock(&kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) nr_ioapic_pins = min_t(u32, table->nr_rt_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) kvm->arch.nr_reserved_ioapic_pins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) for (i = 0; i < nr_ioapic_pins; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) hlist_for_each_entry(entry, &table->map[i], link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct kvm_lapic_irq irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (entry->type != KVM_IRQ_ROUTING_MSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) kvm_set_msi_irq(vcpu->kvm, entry, &irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (irq.trig_mode &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) irq.dest_id, irq.dest_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) __set_bit(irq.vector, ioapic_handled_vectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) srcu_read_unlock(&kvm->irq_srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) void kvm_arch_irq_routing_update(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) kvm_hv_irq_routing_update(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }