^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define pr_fmt(fmt) "DMAR-IR: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/dmar.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/hpet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/intel-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/irqdomain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/io_apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/irq_remapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/pci-direct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/msidef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "../irq_remapping.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) enum irq_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) IRQ_REMAPPING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) IRQ_POSTING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct ioapic_scope {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct intel_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned int bus; /* PCI bus number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned int devfn; /* PCI devfn number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct hpet_scope {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct intel_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u8 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned int bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned int devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct irq_2_iommu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct intel_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u16 irte_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u16 sub_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u8 irte_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) enum irq_mode mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct intel_ir_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct irq_2_iommu irq_2_iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct irte irte_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct msi_msg msi_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static int __read_mostly eim_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static struct hpet_scope ir_hpet[MAX_HPET_TBS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Lock ordering:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * ->dmar_global_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * ->irq_2_ir_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * ->qi->q_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * ->iommu->register_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * in single-threaded environment with interrupt disabled, so no need to tabke
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * the dmar_global_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static const struct irq_domain_ops intel_ir_domain_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static int __init parse_ioapics_under_ir(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static bool ir_pre_enabled(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static void clear_ir_pre_enabled(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static void init_ir_status(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u32 gsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) gsts = readl(iommu->reg + DMAR_GSTS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (gsts & DMA_GSTS_IRES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static int alloc_irte(struct intel_iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct irq_2_iommu *irq_iommu, u16 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct ir_table *table = iommu->ir_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned int mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (!count || !irq_iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) count = __roundup_pow_of_two(count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) mask = ilog2(count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (mask > ecap_max_handle_mask(iommu->ecap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) pr_err("Requested mask %x exceeds the max invalidation handle"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) " mask value %Lx\n", mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ecap_max_handle_mask(iommu->ecap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) index = bitmap_find_free_region(table->bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) INTR_REMAP_TABLE_ENTRIES, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) irq_iommu->iommu = iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) irq_iommu->irte_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) irq_iommu->sub_handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) irq_iommu->irte_mask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) irq_iommu->mode = IRQ_REMAPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct qi_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) desc.qw0 = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) | QI_IEC_SELECTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) desc.qw1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) desc.qw2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) desc.qw3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return qi_submit_sync(iommu, &desc, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static int modify_irte(struct irq_2_iommu *irq_iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct irte *irte_modified)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct intel_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct irte *irte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int rc, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!irq_iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) iommu = irq_iommu->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) index = irq_iommu->irte_index + irq_iommu->sub_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) irte = &iommu->ir_table->base[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if ((irte->pst == 1) || (irte_modified->pst == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ret = cmpxchg_double(&irte->low, &irte->high,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) irte->low, irte->high,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) irte_modified->low, irte_modified->high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * We use cmpxchg16 to atomically update the 128-bit IRTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * and it cannot be updated by the hardware or other processors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * behind us, so the return value of cmpxchg16 should be the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * same as the old value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) WARN_ON(!ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) set_64bit(&irte->low, irte_modified->low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) set_64bit(&irte->high, irte_modified->high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) __iommu_flush_cache(iommu, irte, sizeof(*irte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) rc = qi_flush_iec(iommu, index, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* Update iommu mode according to the IRTE mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static struct irq_domain *map_hpet_to_ir(u8 hpet_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) for (i = 0; i < MAX_HPET_TBS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return ir_hpet[i].iommu->ir_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static struct intel_iommu *map_ioapic_to_iommu(int apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) for (i = 0; i < MAX_IO_APICS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return ir_ioapic[i].iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static struct irq_domain *map_ioapic_to_ir(int apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct intel_iommu *iommu = map_ioapic_to_iommu(apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return iommu ? iommu->ir_domain : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static struct irq_domain *map_dev_to_ir(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct dmar_drhd_unit *drhd = dmar_find_matched_drhd_unit(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return drhd ? drhd->iommu->ir_msi_domain : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static int clear_entries(struct irq_2_iommu *irq_iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct irte *start, *entry, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct intel_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (irq_iommu->sub_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) iommu = irq_iommu->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) index = irq_iommu->irte_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) start = iommu->ir_table->base + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) end = start + (1 << irq_iommu->irte_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) for (entry = start; entry < end; entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) set_64bit(&entry->low, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) set_64bit(&entry->high, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) bitmap_release_region(iommu->ir_table->bitmap, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) irq_iommu->irte_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * source validation type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #define SVT_NO_VERIFY 0x0 /* no verification is required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * source-id qualifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * the third least significant bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * the second and third least significant bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * the least three significant bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * set SVT, SQ and SID fields of irte to verify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * source ids of interrupt requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static void set_irte_sid(struct irte *irte, unsigned int svt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) unsigned int sq, unsigned int sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (disable_sourceid_checking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) svt = SVT_NO_VERIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) irte->svt = svt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) irte->sq = sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) irte->sid = sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * Set an IRTE to match only the bus number. Interrupt requests that reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * this IRTE must have a requester-id whose bus number is between or equal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * to the start_bus and end_bus arguments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static void set_irte_verify_bus(struct irte *irte, unsigned int start_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) unsigned int end_bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) (start_bus << 8) | end_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static int set_ioapic_sid(struct irte *irte, int apic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) u16 sid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (!irte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) down_read(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) for (i = 0; i < MAX_IO_APICS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) up_read(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (sid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static int set_hpet_sid(struct irte *irte, u8 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) u16 sid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!irte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) down_read(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) for (i = 0; i < MAX_HPET_TBS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) up_read(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (sid == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) pr_warn("Failed to set source-id of HPET block (%d)\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * Should really use SQ_ALL_16. Some platforms are broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * While we figure out the right quirks for these broken platforms, use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * SQ_13_IGNORE_3 for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct set_msi_sid_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) u16 alias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int busmatch_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct set_msi_sid_data *data = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) data->busmatch_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) data->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) data->alias = alias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) data->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct set_msi_sid_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!irte || !dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) data.count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) data.busmatch_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * DMA alias provides us with a PCI device and alias. The only case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * where the it will return an alias on a different bus than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * device is the case of a PCIe-to-PCI bridge, where the alias is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * the subordinate bus. In this case we can only verify the bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * If there are multiple aliases, all with the same bus number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * then all we can do is verify the bus. This is typical in NTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * hardware which use proxy IDs where the device will generate traffic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * from multiple devfn numbers on the same bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * If the alias device is on a different bus than our source device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * then we have a topology based alias, use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * Otherwise, the alias is for a device DMA quirk and we cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * assume that MSI uses the same requester ID. Therefore use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * original device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) set_irte_verify_bus(irte, PCI_BUS_NUM(data.alias),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dev->bus->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) else if (data.count >= 2 && data.busmatch_count == data.count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) set_irte_verify_bus(irte, dev->bus->number, dev->bus->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) else if (data.pdev->bus->number != dev->bus->number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) pci_dev_id(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static int iommu_load_old_irte(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct irte *old_ir_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) phys_addr_t irt_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u64 irta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* Check whether the old ir-table has the same size as ours */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) != INTR_REMAP_TABLE_REG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) irt_phys = irta & VTD_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) size = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* Map the old IR table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) old_ir_table = memremap(irt_phys, size, MEMREMAP_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (!old_ir_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* Copy data over */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) memcpy(iommu->ir_table->base, old_ir_table, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) __iommu_flush_cache(iommu, iommu->ir_table->base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * Now check the table for used entries and mark those as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * allocated in the bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (iommu->ir_table->base[i].present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) bitmap_set(iommu->ir_table->bitmap, i, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) memunmap(old_ir_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) u32 sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) addr = virt_to_phys((void *)iommu->ir_table->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) raw_spin_lock_irqsave(&iommu->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) dmar_writeq(iommu->reg + DMAR_IRTA_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* Set interrupt-remapping table pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) readl, (sts & DMA_GSTS_IRTPS), sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * Global invalidation of interrupt entry cache to make sure the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * hardware uses the new irq remapping table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) qi_global_iec(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) u32 sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) raw_spin_lock_irqsave(&iommu->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /* Enable interrupt-remapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) iommu->gcmd |= DMA_GCMD_IRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) readl, (sts & DMA_GSTS_IRES), sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* Block compatibility-format MSIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (sts & DMA_GSTS_CFIS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) iommu->gcmd &= ~DMA_GCMD_CFI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) readl, !(sts & DMA_GSTS_CFIS), sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * With CFI clear in the Global Command register, we should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * protected from dangerous (i.e. compatibility) interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * regardless of x2apic status. Check just to be sure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (sts & DMA_GSTS_CFIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) WARN(1, KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) "Compatibility-format IRQs enabled despite intr remapping;\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) "you are vulnerable to IRQ injection.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static int intel_setup_irq_remapping(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct ir_table *ir_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct fwnode_handle *fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) unsigned long *bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct page *pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (iommu->ir_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (!ir_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) INTR_REMAP_PAGE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (!pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) pr_err("IR%d: failed to allocate pages of order %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) iommu->seq_id, INTR_REMAP_PAGE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) goto out_free_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) bitmap = bitmap_zalloc(INTR_REMAP_TABLE_ENTRIES, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (bitmap == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) goto out_free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (!fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) goto out_free_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) iommu->ir_domain =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 0, INTR_REMAP_TABLE_ENTRIES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) fn, &intel_ir_domain_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (!iommu->ir_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) goto out_free_fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) iommu->ir_msi_domain =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) arch_create_remap_msi_irq_domain(iommu->ir_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) "INTEL-IR-MSI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) iommu->seq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ir_table->base = page_address(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) ir_table->bitmap = bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) iommu->ir_table = ir_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * If the queued invalidation is already initialized,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * shouldn't disable it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (!iommu->qi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * Clear previous faults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) dmar_fault(-1, iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) dmar_disable_qi(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (dmar_enable_qi(iommu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) pr_err("Failed to enable queued invalidation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) goto out_free_ir_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) init_ir_status(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (ir_pre_enabled(iommu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (!is_kdump_kernel()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) iommu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) clear_ir_pre_enabled(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) iommu_disable_irq_remapping(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) } else if (iommu_load_old_irte(iommu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) pr_err("Failed to copy IR table for %s from previous kernel\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) iommu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) pr_info("Copied IR table for %s from previous kernel\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) iommu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) iommu_set_irq_remapping(iommu, eim_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) out_free_ir_domain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (iommu->ir_msi_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) irq_domain_remove(iommu->ir_msi_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) iommu->ir_msi_domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) irq_domain_remove(iommu->ir_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) iommu->ir_domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) out_free_fwnode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) irq_domain_free_fwnode(fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) out_free_bitmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) bitmap_free(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) out_free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) __free_pages(pages, INTR_REMAP_PAGE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) out_free_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) kfree(ir_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) iommu->ir_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct fwnode_handle *fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (iommu && iommu->ir_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (iommu->ir_msi_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) fn = iommu->ir_msi_domain->fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) irq_domain_remove(iommu->ir_msi_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) irq_domain_free_fwnode(fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) iommu->ir_msi_domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (iommu->ir_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) fn = iommu->ir_domain->fwnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) irq_domain_remove(iommu->ir_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) irq_domain_free_fwnode(fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) iommu->ir_domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) free_pages((unsigned long)iommu->ir_table->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) INTR_REMAP_PAGE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) bitmap_free(iommu->ir_table->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) kfree(iommu->ir_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) iommu->ir_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * Disable Interrupt Remapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) u32 sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (!ecap_ir_support(iommu->ecap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * global invalidation of interrupt entry cache before disabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * interrupt-remapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) qi_global_iec(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) raw_spin_lock_irqsave(&iommu->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) sts = readl(iommu->reg + DMAR_GSTS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (!(sts & DMA_GSTS_IRES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) iommu->gcmd &= ~DMA_GCMD_IRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) readl, !(sts & DMA_GSTS_IRES), sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static int __init dmar_x2apic_optout(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct acpi_table_dmar *dmar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) dmar = (struct acpi_table_dmar *)dmar_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (!dmar || no_x2apic_optout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return dmar->flags & DMAR_X2APIC_OPT_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static void __init intel_cleanup_irq_remapping(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct dmar_drhd_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct intel_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) for_each_iommu(iommu, drhd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (ecap_ir_support(iommu->ecap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) iommu_disable_irq_remapping(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) intel_teardown_irq_remapping(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (x2apic_supported())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static int __init intel_prepare_irq_remapping(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct dmar_drhd_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct intel_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) int eim = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (irq_remap_broken) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) pr_warn("This system BIOS has enabled interrupt remapping\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) "on a chipset that contains an erratum making that\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) "feature unstable. To maintain system stability\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) "interrupt remapping is being disabled. Please\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) "contact your BIOS vendor for an update\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (dmar_table_init() < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (!dmar_ir_support())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (parse_ioapics_under_ir()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) pr_info("Not enabling interrupt remapping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) /* First make sure all IOMMUs support IRQ remapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) for_each_iommu(iommu, drhd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!ecap_ir_support(iommu->ecap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* Detect remapping mode: lapic or x2apic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (x2apic_supported()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) eim = !dmar_x2apic_optout();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (!eim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) pr_info("x2apic is disabled because BIOS sets x2apic opt out bit.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) pr_info("Use 'intremap=no_x2apic_optout' to override the BIOS setting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) for_each_iommu(iommu, drhd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (eim && !ecap_eim_support(iommu->ecap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) pr_info("%s does not support EIM\n", iommu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) eim = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) eim_mode = eim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (eim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* Do the initializations early */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) for_each_iommu(iommu, drhd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (intel_setup_irq_remapping(iommu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) pr_err("Failed to setup irq remapping for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) iommu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) intel_cleanup_irq_remapping();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * Set Posted-Interrupts capability.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static inline void set_irq_posting_cap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct dmar_drhd_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct intel_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (!disable_irq_post) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * If IRTE is in posted format, the 'pda' field goes across the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * 64-bit boundary, we need use cmpxchg16b to atomically update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * it. We only expose posted-interrupt when X86_FEATURE_CX16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * is supported. Actually, hardware platforms supporting PI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * should have X86_FEATURE_CX16 support, this has been confirmed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * with Intel hardware guys.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (boot_cpu_has(X86_FEATURE_CX16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) for_each_iommu(iommu, drhd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (!cap_pi_support(iommu->cap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) intel_irq_remap_ops.capability &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ~(1 << IRQ_POSTING_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static int __init intel_enable_irq_remapping(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct dmar_drhd_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct intel_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) bool setup = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * Setup Interrupt-remapping for all the DRHD's now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) for_each_iommu(iommu, drhd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (!ir_pre_enabled(iommu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) iommu_enable_irq_remapping(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) setup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (!setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) irq_remapping_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) set_irq_posting_cap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) pr_info("Enabled IRQ remapping in %s mode\n", eim_mode ? "x2apic" : "xapic");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return eim_mode ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) intel_cleanup_irq_remapping();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct intel_iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct acpi_dmar_hardware_unit *drhd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct acpi_dmar_pci_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) u8 bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) int count, free = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) bus = scope->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) path = (struct acpi_dmar_pci_path *)(scope + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) count = (scope->length - sizeof(struct acpi_dmar_device_scope))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) / sizeof(struct acpi_dmar_pci_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) while (--count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * Access PCI directly due to the PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * subsystem isn't initialized yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) bus = read_pci_config_byte(bus, path->device, path->function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) PCI_SECONDARY_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) path++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) for (count = 0; count < MAX_HPET_TBS; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (ir_hpet[count].iommu == iommu &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) ir_hpet[count].id == scope->enumeration_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) else if (ir_hpet[count].iommu == NULL && free == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) free = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (free == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) pr_warn("Exceeded Max HPET blocks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) ir_hpet[free].iommu = iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) ir_hpet[free].id = scope->enumeration_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) ir_hpet[free].bus = bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) pr_info("HPET id %d under DRHD base 0x%Lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) scope->enumeration_id, drhd->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct intel_iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct acpi_dmar_hardware_unit *drhd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct acpi_dmar_pci_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) u8 bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) int count, free = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) bus = scope->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) path = (struct acpi_dmar_pci_path *)(scope + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) count = (scope->length - sizeof(struct acpi_dmar_device_scope))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) / sizeof(struct acpi_dmar_pci_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) while (--count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * Access PCI directly due to the PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * subsystem isn't initialized yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) bus = read_pci_config_byte(bus, path->device, path->function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) PCI_SECONDARY_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) path++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) for (count = 0; count < MAX_IO_APICS; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (ir_ioapic[count].iommu == iommu &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) ir_ioapic[count].id == scope->enumeration_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) else if (ir_ioapic[count].iommu == NULL && free == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) free = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (free == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) pr_warn("Exceeded Max IO APICS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) ir_ioapic[free].bus = bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ir_ioapic[free].iommu = iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) ir_ioapic[free].id = scope->enumeration_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) scope->enumeration_id, drhd->address, iommu->seq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct acpi_dmar_hardware_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct acpi_dmar_device_scope *scope;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) void *start, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) drhd = (struct acpi_dmar_hardware_unit *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) start = (void *)(drhd + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) end = ((void *)drhd) + header->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) while (start < end && ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) scope = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) start += scope->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) for (i = 0; i < MAX_HPET_TBS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (ir_hpet[i].iommu == iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) ir_hpet[i].iommu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) for (i = 0; i < MAX_IO_APICS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (ir_ioapic[i].iommu == iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ir_ioapic[i].iommu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * Finds the assocaition between IOAPIC's and its Interrupt-remapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * hardware unit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static int __init parse_ioapics_under_ir(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct dmar_drhd_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct intel_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) bool ir_supported = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) int ioapic_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) for_each_iommu(iommu, drhd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (!ecap_ir_support(iommu->ecap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) ir_supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (!ir_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) int ioapic_id = mpc_ioapic_id(ioapic_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (!map_ioapic_to_iommu(ioapic_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) pr_err(FW_BUG "ioapic %d has no mapping iommu, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) "interrupt remapping will be disabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ioapic_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static int __init ir_dev_scope_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (!irq_remapping_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) down_write(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) ret = dmar_dev_scope_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) up_write(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) rootfs_initcall(ir_dev_scope_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static void disable_irq_remapping(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct dmar_drhd_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct intel_iommu *iommu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * Disable Interrupt-remapping for all the DRHD's now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) for_each_iommu(iommu, drhd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (!ecap_ir_support(iommu->ecap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) iommu_disable_irq_remapping(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * Clear Posted-Interrupts capability.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (!disable_irq_post)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) intel_irq_remap_ops.capability &= ~(1 << IRQ_POSTING_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static int reenable_irq_remapping(int eim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct dmar_drhd_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) bool setup = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct intel_iommu *iommu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) for_each_iommu(iommu, drhd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (iommu->qi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) dmar_reenable_qi(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * Setup Interrupt-remapping for all the DRHD's now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) for_each_iommu(iommu, drhd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (!ecap_ir_support(iommu->ecap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /* Set up interrupt remapping for iommu.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) iommu_set_irq_remapping(iommu, eim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) iommu_enable_irq_remapping(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) setup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (!setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) set_irq_posting_cap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * handle error condition gracefully here!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * Store the MSI remapping domain pointer in the device if enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * This is called from dmar_pci_bus_add_dev() so it works even when DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * remapping is disabled. Only update the pointer if the device is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * already handled by a non default PCI/MSI interrupt domain. This protects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * e.g. VMD devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) void intel_irq_remap_add_device(struct dmar_pci_notify_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (!irq_remapping_enabled || pci_dev_has_special_msi_domain(info->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) dev_set_msi_domain(&info->dev->dev, map_dev_to_ir(info->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) memset(irte, 0, sizeof(*irte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) irte->present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) irte->dst_mode = apic->irq_dest_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * actual level or edge trigger will be setup in the IO-APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * RTE. This will help simplify level triggered irq migration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * For more details, see the comments (in io_apic.c) explainig IO-APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * irq migration in the presence of interrupt-remapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) irte->trigger_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) irte->dlvry_mode = apic->irq_delivery_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) irte->vector = vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) irte->dest_id = IRTE_DEST(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) irte->redir_hint = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) static struct irq_domain *intel_get_irq_domain(struct irq_alloc_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) switch (info->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) case X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return map_ioapic_to_ir(info->devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) case X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return map_hpet_to_ir(info->devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct irq_remap_ops intel_irq_remap_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) .prepare = intel_prepare_irq_remapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) .enable = intel_enable_irq_remapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) .disable = disable_irq_remapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) .reenable = reenable_irq_remapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) .enable_faulting = enable_drhd_fault_handling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) .get_irq_domain = intel_get_irq_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) struct intel_ir_data *ir_data = irqd->chip_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) struct irte *irte = &ir_data->irte_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct irq_cfg *cfg = irqd_cfg(irqd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * Atomically updates the IRTE with the new destination, vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * and flushes the interrupt entry cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) irte->vector = cfg->vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) irte->dest_id = IRTE_DEST(cfg->dest_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /* Update the hardware only if the interrupt is in remapped mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) modify_irte(&ir_data->irq_2_iommu, irte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * Migrate the IO-APIC irq in the presence of intr-remapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * For both level and edge triggered, irq migration is a simple atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * update(of vector and cpu destination) of IRTE and flush the hardware cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * For level triggered, we eliminate the io-apic RTE modification (with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * updated vector information), by using a virtual vector (io-apic pin number).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * Real vector that is used for interrupting cpu will be coming from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * the interrupt-remapping table entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * As the migration is a simple atomic update of IRTE, the same mechanism
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) * is used to migrate MSI irq's in the presence of interrupt-remapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) struct irq_data *parent = data->parent_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct irq_cfg *cfg = irqd_cfg(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) ret = parent->chip->irq_set_affinity(parent, mask, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) intel_ir_reconfigure_irte(data, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * After this point, all the interrupts will start arriving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * at the new destination. So, time to cleanup the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * vector allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) send_cleanup_vector(cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return IRQ_SET_MASK_OK_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) static void intel_ir_compose_msi_msg(struct irq_data *irq_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) struct intel_ir_data *ir_data = irq_data->chip_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) *msg = ir_data->msi_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) struct intel_ir_data *ir_data = data->chip_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) struct vcpu_data *vcpu_pi_info = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /* stop posting interrupts, back to remapping mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (!vcpu_pi_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct irte irte_pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * We are not caching the posted interrupt entry. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * copy the data from the remapped entry and modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * the fields which are relevant for posted mode. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * cached remapped entry is used for switching back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * remapped mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) memset(&irte_pi, 0, sizeof(irte_pi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) dmar_copy_shared_irte(&irte_pi, &ir_data->irte_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /* Update the posted mode fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) irte_pi.p_pst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) irte_pi.p_urgent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) irte_pi.p_vector = vcpu_pi_info->vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) ~(-1UL << PDA_HIGH_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) modify_irte(&ir_data->irq_2_iommu, &irte_pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static struct irq_chip intel_ir_chip = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) .name = "INTEL-IR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) .irq_ack = apic_ack_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) .irq_set_affinity = intel_ir_set_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) .irq_compose_msi_msg = intel_ir_compose_msi_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) struct irq_cfg *irq_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct irq_alloc_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) int index, int sub_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) struct IR_IO_APIC_route_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) struct irte *irte = &data->irte_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) struct msi_msg *msg = &data->msi_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) switch (info->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) case X86_IRQ_ALLOC_TYPE_IOAPIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /* Set source-id of interrupt request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) set_ioapic_sid(irte, info->devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) info->devid, irte->present, irte->fpd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) irte->dst_mode, irte->redir_hint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) irte->trigger_mode, irte->dlvry_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) irte->avail, irte->vector, irte->dest_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) irte->sid, irte->sq, irte->svt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) entry = (struct IR_IO_APIC_route_entry *)info->ioapic.entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) info->ioapic.entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) memset(entry, 0, sizeof(*entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) entry->index2 = (index >> 15) & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) entry->zero = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) entry->format = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) entry->index = (index & 0x7fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * IO-APIC RTE will be configured with virtual vector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * irq handler will do the explicit EOI to the io-apic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) entry->vector = info->ioapic.pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) entry->mask = 0; /* enable IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) entry->trigger = info->ioapic.trigger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) entry->polarity = info->ioapic.polarity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (info->ioapic.trigger)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) entry->mask = 1; /* Mask level triggered irqs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) case X86_IRQ_ALLOC_TYPE_HPET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) case X86_IRQ_ALLOC_TYPE_PCI_MSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (info->type == X86_IRQ_ALLOC_TYPE_HPET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) set_hpet_sid(irte, info->devid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) set_msi_sid(irte, msi_desc_to_pci_dev(info->desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) msg->address_hi = MSI_ADDR_BASE_HI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) msg->data = sub_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) MSI_ADDR_IR_SHV |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) MSI_ADDR_IR_INDEX1(index) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) MSI_ADDR_IR_INDEX2(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) static void intel_free_irq_resources(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) unsigned int virq, unsigned int nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) struct irq_data *irq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) struct intel_ir_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) struct irq_2_iommu *irq_iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) for (i = 0; i < nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) irq_data = irq_domain_get_irq_data(domain, virq + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (irq_data && irq_data->chip_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) data = irq_data->chip_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) irq_iommu = &data->irq_2_iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) clear_entries(irq_iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) irq_domain_reset_irq_data(irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) static int intel_irq_remapping_alloc(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) unsigned int virq, unsigned int nr_irqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) struct intel_iommu *iommu = domain->host_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) struct irq_alloc_info *info = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct intel_ir_data *data, *ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct irq_data *irq_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct irq_cfg *irq_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) int i, ret, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (!info || !iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * With IRQ remapping enabled, don't need contiguous CPU vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * to support multiple MSI interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) data = kzalloc(sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) goto out_free_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) down_read(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) up_read(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) pr_warn("Failed to allocate IRTE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) goto out_free_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) for (i = 0; i < nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) irq_data = irq_domain_get_irq_data(domain, virq + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) irq_cfg = irqd_cfg(irq_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (!irq_data || !irq_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) goto out_free_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (i > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) ird = kzalloc(sizeof(*ird), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (!ird)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) goto out_free_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) /* Initialize the common data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) ird->irq_2_iommu = data->irq_2_iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) ird->irq_2_iommu.sub_handle = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) ird = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) irq_data->hwirq = (index << 16) + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) irq_data->chip_data = ird;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) irq_data->chip = &intel_ir_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) out_free_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) intel_free_irq_resources(domain, virq, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) out_free_parent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) irq_domain_free_irqs_common(domain, virq, nr_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) static void intel_irq_remapping_free(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) unsigned int virq, unsigned int nr_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) intel_free_irq_resources(domain, virq, nr_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) irq_domain_free_irqs_common(domain, virq, nr_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) static int intel_irq_remapping_activate(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) struct irq_data *irq_data, bool reserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) intel_ir_reconfigure_irte(irq_data, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) static void intel_irq_remapping_deactivate(struct irq_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) struct irq_data *irq_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) struct intel_ir_data *data = irq_data->chip_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct irte entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) memset(&entry, 0, sizeof(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) modify_irte(&data->irq_2_iommu, &entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) static const struct irq_domain_ops intel_ir_domain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) .alloc = intel_irq_remapping_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) .free = intel_irq_remapping_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) .activate = intel_irq_remapping_activate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) .deactivate = intel_irq_remapping_deactivate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * Support of Interrupt Remapping Unit Hotplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) int eim = x2apic_enabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) if (eim && !ecap_eim_support(iommu->ecap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) iommu->reg_phys, iommu->ecap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) iommu->reg_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) /* TODO: check all IOAPICs are covered by IOMMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) /* Setup Interrupt-remapping now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) ret = intel_setup_irq_remapping(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) pr_err("Failed to setup irq remapping for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) iommu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) intel_teardown_irq_remapping(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) ir_remove_ioapic_hpet_scope(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) iommu_enable_irq_remapping(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) struct intel_iommu *iommu = dmaru->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (!irq_remapping_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (iommu == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (!ecap_ir_support(iommu->ecap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (irq_remapping_cap(IRQ_POSTING_CAP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) !cap_pi_support(iommu->cap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (insert) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (!iommu->ir_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) ret = dmar_ir_add(dmaru, iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (iommu->ir_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (!bitmap_empty(iommu->ir_table->bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) INTR_REMAP_TABLE_ENTRIES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) iommu_disable_irq_remapping(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) intel_teardown_irq_remapping(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) ir_remove_ioapic_hpet_scope(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }