^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2006, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2006-2008 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Ashok Raj <ashok.raj@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Shaohua Li <shaohua.li@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * This file implements early detection/parsing of Remapping Devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * These routines are used by both DMA-remapping and Interrupt-remapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define pr_fmt(fmt) "DMAR: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/dmar.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/iova.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/intel-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/tboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/dmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/limits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/irq_remapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/iommu_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "../irq_remapping.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct dmar_res_callback {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) void *arg[ACPI_DMAR_TYPE_RESERVED];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) bool ignore_unhandled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) bool print_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Assumptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * 1) The hotplug framework guarentees that DMAR unit will be hot-added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * before IO devices managed by that unit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * after IO devices managed by that unit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * 3) Hotplug events are rare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * Locking rules for DMA and interrupt remapping related global data structures:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * 1) Use dmar_global_lock in process context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * 2) Use RCU in interrupt context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) DECLARE_RWSEM(dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) LIST_HEAD(dmar_drhd_units);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct acpi_table_header * __initdata dmar_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static int dmar_dev_scope_status = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static int alloc_iommu(struct dmar_drhd_unit *drhd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static void free_iommu(struct intel_iommu *iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) extern const struct iommu_ops intel_iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * add INCLUDE_ALL at the tail, so scan the list will find it at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * the very end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (drhd->include_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) list_add_rcu(&drhd->list, &dmar_drhd_units);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct acpi_dmar_device_scope *scope;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) scope = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) (*cnt)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) pr_warn("Unsupported device scope\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) start += scope->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (*cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct device *tmp_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (*devices && *cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) put_device(tmp_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) kfree(*devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *devices = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) *cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Optimize out kzalloc()/kfree() for normal cases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static char dmar_pci_notify_info_buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static struct dmar_pci_notify_info *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct pci_dev *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct dmar_pci_notify_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) BUG_ON(dev->is_virtfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * Ignore devices that have a domain number higher than what can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (pci_domain_nr(dev->bus) > U16_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* Only generate path[] for device addition event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (event == BUS_NOTIFY_ADD_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) for (tmp = dev; tmp; tmp = tmp->bus->self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) level++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) size = struct_size(info, path, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (size <= sizeof(dmar_pci_notify_info_buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) info = kzalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) pr_warn("Out of memory when allocating notify_info "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) "for %s.\n", pci_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (dmar_dev_scope_status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) dmar_dev_scope_status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) info->event = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) info->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) info->seg = pci_domain_nr(dev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) info->level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (event == BUS_NOTIFY_ADD_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) for (tmp = dev; tmp; tmp = tmp->bus->self) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) level--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) info->path[level].bus = tmp->bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) info->path[level].device = PCI_SLOT(tmp->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) info->path[level].function = PCI_FUNC(tmp->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (pci_is_root_bus(tmp->bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) info->bus = tmp->bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if ((void *)info != dmar_pci_notify_info_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) kfree(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct acpi_dmar_pci_path *path, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (info->bus != bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) goto fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (info->level != count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) goto fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (path[i].device != info->path[i].device ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) path[i].function != info->path[i].function)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) goto fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) fallback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (count != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) i = info->level - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (bus == info->path[i].bus &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) path[0].device == info->path[i].device &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) path[0].function == info->path[i].function) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) bus, path[0].device, path[0].function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) void *start, void*end, u16 segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct dmar_dev_scope *devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int devices_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int i, level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct device *tmp, *dev = &info->dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct acpi_dmar_device_scope *scope;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct acpi_dmar_pci_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (segment != info->seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) for (; start < end; start += scope->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) scope = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) path = (struct acpi_dmar_pci_path *)(scope + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) level = (scope->length - sizeof(*scope)) / sizeof(*path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!dmar_match_pci_path(info, scope->bus, path, level))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * We expect devices with endpoint scope to have normal PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * headers, and devices with bridge scope to have bridge PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * headers. However PCI NTB devices may be listed in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * DMAR table with bridge scope, even though they have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * normal PCI header. NTB devices are identified by class
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * for this special case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) pr_warn("Device scope type does not match for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) pci_name(info->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) for_each_dev_scope(devices, devices_cnt, i, tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (tmp == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) devices[i].bus = info->dev->bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) devices[i].devfn = info->dev->devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) rcu_assign_pointer(devices[i].dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) get_device(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) BUG_ON(i >= devices_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct dmar_dev_scope *devices, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct device *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (info->seg != segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) for_each_active_dev_scope(devices, count, index, tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (tmp == &info->dev->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) RCU_INIT_POINTER(devices[index].dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) put_device(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct dmar_drhd_unit *dmaru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct acpi_dmar_hardware_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) for_each_drhd_unit(dmaru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (dmaru->include_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) drhd = container_of(dmaru->hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct acpi_dmar_hardware_unit, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ((void *)drhd) + drhd->header.length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) dmaru->segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dmaru->devices, dmaru->devices_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ret = dmar_iommu_notify_scope_dev(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (ret < 0 && dmar_dev_scope_status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) dmar_dev_scope_status = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) intel_irq_remap_add_device(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct dmar_drhd_unit *dmaru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) for_each_drhd_unit(dmaru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (dmar_remove_dev_scope(info, dmaru->segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) dmaru->devices, dmaru->devices_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) dmar_iommu_notify_scope_dev(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static inline void vf_inherit_msi_domain(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct pci_dev *physfn = pci_physfn(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) dev_set_msi_domain(&pdev->dev, dev_get_msi_domain(&physfn->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static int dmar_pci_bus_notifier(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) unsigned long action, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct pci_dev *pdev = to_pci_dev(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct dmar_pci_notify_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* Only care about add/remove events for physical functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * For VFs we actually do the lookup based on the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * PF in device_to_iommu() anyway. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (pdev->is_virtfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * Ensure that the VF device inherits the irq domain of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * PF device. Ideally the device would inherit the domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * from the bus, but DMAR can have multiple units per bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * which makes this impossible. The VF 'bus' could inherit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * from the PF device, but that's yet another x86'sism to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * inflict on everybody else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (action == BUS_NOTIFY_ADD_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) vf_inherit_msi_domain(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (action != BUS_NOTIFY_ADD_DEVICE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) action != BUS_NOTIFY_REMOVED_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) info = dmar_alloc_pci_notify_info(pdev, action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) down_write(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (action == BUS_NOTIFY_ADD_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) dmar_pci_bus_add_dev(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) else if (action == BUS_NOTIFY_REMOVED_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) dmar_pci_bus_del_dev(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) up_write(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dmar_free_pci_notify_info(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static struct notifier_block dmar_pci_bus_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) .notifier_call = dmar_pci_bus_notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) .priority = INT_MIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static struct dmar_drhd_unit *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct dmar_drhd_unit *dmaru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) dmar_rcu_check())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (dmaru->segment == drhd->segment &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) dmaru->reg_base_addr == drhd->address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return dmaru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * structure which uniquely represent one DMA remapping hardware unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * present in the platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct acpi_dmar_hardware_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct dmar_drhd_unit *dmaru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) drhd = (struct acpi_dmar_hardware_unit *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dmaru = dmar_find_dmaru(drhd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (dmaru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (!dmaru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * If header is allocated from slab by ACPI _DSM method, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * copy the content because the memory buffer will be freed on return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dmaru->hdr = (void *)(dmaru + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) memcpy(dmaru->hdr, header, header->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) dmaru->reg_base_addr = drhd->address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) dmaru->segment = drhd->segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ((void *)drhd) + drhd->header.length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) &dmaru->devices_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (dmaru->devices_cnt && dmaru->devices == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) kfree(dmaru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ret = alloc_iommu(dmaru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) dmar_free_dev_scope(&dmaru->devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) &dmaru->devices_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) kfree(dmaru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) dmar_register_drhd_unit(dmaru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) (*(int *)arg)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (dmaru->devices && dmaru->devices_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (dmaru->iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) free_iommu(dmaru->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) kfree(dmaru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct acpi_dmar_andd *andd = (void *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Check for NUL termination within the designated length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pr_warn(FW_BUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) dmi_get_system_info(DMI_BIOS_VENDOR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) dmi_get_system_info(DMI_BIOS_VERSION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) dmi_get_system_info(DMI_PRODUCT_VERSION));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) pr_info("ANDD device: %x name: %s\n", andd->device_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) andd->device_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) #ifdef CONFIG_ACPI_NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct acpi_dmar_rhsa *rhsa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct dmar_drhd_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) rhsa = (struct acpi_dmar_rhsa *)header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) for_each_drhd_unit(drhd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (drhd->reg_base_addr == rhsa->base_address) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int node = pxm_to_node(rhsa->proximity_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (!node_online(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) drhd->iommu->node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) pr_warn(FW_BUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) rhsa->base_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) dmi_get_system_info(DMI_BIOS_VENDOR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) dmi_get_system_info(DMI_BIOS_VERSION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) dmi_get_system_info(DMI_PRODUCT_VERSION));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) #define dmar_parse_one_rhsa dmar_res_noop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct acpi_dmar_hardware_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct acpi_dmar_reserved_memory *rmrr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct acpi_dmar_atsr *atsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct acpi_dmar_rhsa *rhsa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) switch (header->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) case ACPI_DMAR_TYPE_HARDWARE_UNIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) drhd = container_of(header, struct acpi_dmar_hardware_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) pr_info("DRHD base: %#016Lx flags: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) (unsigned long long)drhd->address, drhd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) case ACPI_DMAR_TYPE_RESERVED_MEMORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) rmrr = container_of(header, struct acpi_dmar_reserved_memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pr_info("RMRR base: %#016Lx end: %#016Lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) (unsigned long long)rmrr->base_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) (unsigned long long)rmrr->end_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) case ACPI_DMAR_TYPE_ROOT_ATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) atsr = container_of(header, struct acpi_dmar_atsr, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) pr_info("ATSR flags: %#x\n", atsr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) rhsa = container_of(header, struct acpi_dmar_rhsa, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) (unsigned long long)rhsa->base_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) rhsa->proximity_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) case ACPI_DMAR_TYPE_NAMESPACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* We don't print this here because we need to sanity-check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) it first. So print it in dmar_parse_one_andd() instead. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * dmar_table_detect - checks to see if the platform supports DMAR devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static int __init dmar_table_detect(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) acpi_status status = AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /* if we could find DMAR table, then there are DMAR devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (ACPI_SUCCESS(status) && !dmar_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) pr_warn("Unable to map DMAR\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) status = AE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return ACPI_SUCCESS(status) ? 0 : -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) size_t len, struct dmar_res_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct acpi_dmar_header *iter, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct acpi_dmar_header *end = ((void *)start) + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) for (iter = start; iter < end; iter = next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) next = (void *)iter + iter->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (iter->length == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* Avoid looping forever on bad ACPI tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) pr_debug(FW_BUG "Invalid 0-length structure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) } else if (next > end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* Avoid passing table end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) pr_warn(FW_BUG "Record passes table end\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (cb->print_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) dmar_table_print_dmar_entry(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /* continue for forward compatibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) pr_debug("Unknown DMAR structure type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) iter->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) } else if (cb->cb[iter->type]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) } else if (!cb->ignore_unhandled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) pr_warn("No handler for DMAR structure type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) iter->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct dmar_res_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return dmar_walk_remapping_entries((void *)(dmar + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) dmar->header.length - sizeof(*dmar), cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * parse_dmar_table - parses the DMA reporting table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) parse_dmar_table(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct acpi_table_dmar *dmar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int drhd_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct dmar_res_callback cb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) .print_entry = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) .ignore_unhandled = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * Do it again, earlier dmar_tbl mapping could be mapped with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * fixed map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) dmar_table_detect();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * ACPI tables may not be DMA protected by tboot, so use DMAR copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) dmar_tbl = tboot_get_dmar_table(dmar_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) dmar = (struct acpi_table_dmar *)dmar_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (!dmar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (dmar->width < PAGE_SHIFT - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) pr_warn("Invalid DMAR haw\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) pr_info("Host address width %d\n", dmar->width + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) ret = dmar_walk_dmar_table(dmar, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (ret == 0 && drhd_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static int dmar_pci_device_match(struct dmar_dev_scope devices[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) int cnt, struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct device *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) while (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) for_each_active_dev_scope(devices, cnt, index, tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* Check our parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) dev = dev->bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct dmar_drhd_unit *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) dmar_find_matched_drhd_unit(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct dmar_drhd_unit *dmaru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct acpi_dmar_hardware_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dev = pci_physfn(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) for_each_drhd_unit(dmaru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) drhd = container_of(dmaru->hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct acpi_dmar_hardware_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (dmaru->include_all &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) drhd->segment == pci_domain_nr(dev->bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (dmar_pci_device_match(dmaru->devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) dmaru->devices_cnt, dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) dmaru = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return dmaru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static void __init dmar_acpi_insert_dev_scope(u8 device_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct acpi_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct dmar_drhd_unit *dmaru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct acpi_dmar_hardware_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct acpi_dmar_device_scope *scope;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct device *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct acpi_dmar_pci_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) for_each_drhd_unit(dmaru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) drhd = container_of(dmaru->hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct acpi_dmar_hardware_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) for (scope = (void *)(drhd + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) scope = ((void *)scope) + scope->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (scope->enumeration_id != device_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) path = (void *)(scope + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) dev_name(&adev->dev), dmaru->reg_base_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) scope->bus, path->device, path->function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (tmp == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) dmaru->devices[i].bus = scope->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) dmaru->devices[i].devfn = PCI_DEVFN(path->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) path->function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) rcu_assign_pointer(dmaru->devices[i].dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) get_device(&adev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) BUG_ON(i >= dmaru->devices_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) device_number, dev_name(&adev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static int __init dmar_acpi_dev_scope_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct acpi_dmar_andd *andd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (dmar_tbl == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) andd = ((void *)andd) + andd->header.length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) acpi_handle h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct acpi_device *adev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) andd->device_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) &h))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) pr_err("Failed to find handle for ACPI object %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) andd->device_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (acpi_bus_get_device(h, &adev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) pr_err("Failed to get device for ACPI object %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) andd->device_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) dmar_acpi_insert_dev_scope(andd->device_number, adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) int __init dmar_dev_scope_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct pci_dev *dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct dmar_pci_notify_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (dmar_dev_scope_status != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return dmar_dev_scope_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (list_empty(&dmar_drhd_units)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) dmar_dev_scope_status = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) dmar_dev_scope_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) dmar_acpi_dev_scope_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) for_each_pci_dev(dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (dev->is_virtfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) info = dmar_alloc_pci_notify_info(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) BUS_NOTIFY_ADD_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (!info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return dmar_dev_scope_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) dmar_pci_bus_add_dev(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) dmar_free_pci_notify_info(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return dmar_dev_scope_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) void __init dmar_register_bus_notifier(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) int __init dmar_table_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) static int dmar_table_initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (dmar_table_initialized == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) ret = parse_dmar_table();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (ret != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) pr_info("Parse DMAR table failure.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) } else if (list_empty(&dmar_drhd_units)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) pr_info("No DMAR devices found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) dmar_table_initialized = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) dmar_table_initialized = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) static void warn_invalid_dmar(u64 addr, const char *message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) pr_warn_once(FW_BUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) "Your BIOS is broken; DMAR reported at address %llx%s!\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) addr, message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) dmi_get_system_info(DMI_BIOS_VENDOR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) dmi_get_system_info(DMI_BIOS_VERSION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) dmi_get_system_info(DMI_PRODUCT_VERSION));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static int __ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct acpi_dmar_hardware_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) u64 cap, ecap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) drhd = (void *)entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (!drhd->address) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) warn_invalid_dmar(0, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) addr = ioremap(drhd->address, VTD_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) cap = dmar_readq(addr + DMAR_CAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ecap = dmar_readq(addr + DMAR_ECAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) iounmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) early_iounmap(addr, VTD_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) warn_invalid_dmar(drhd->address, " returns all ones");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) int __init detect_intel_iommu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct dmar_res_callback validate_drhd_cb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) .ignore_unhandled = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) down_write(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) ret = dmar_table_detect();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) &validate_drhd_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (!ret && !no_iommu && !iommu_detected &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) (!dmar_disabled || dmar_platform_optin())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) iommu_detected = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* Make sure ACS will be enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) pci_request_acs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) x86_init.iommu.iommu_init = intel_iommu_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) x86_platform.iommu_shutdown = intel_iommu_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (dmar_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) acpi_put_table(dmar_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) dmar_tbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) up_write(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return ret ? ret : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static void unmap_iommu(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) iounmap(iommu->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) release_mem_region(iommu->reg_phys, iommu->reg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * map_iommu: map the iommu's registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * @iommu: the iommu to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * @phys_addr: the physical address of the base resgister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * Memory map the iommu's registers. Start w/ a single page, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * possibly expand if that turns out to be insufficent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) int map_size, err=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) iommu->reg_phys = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) iommu->reg_size = VTD_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) pr_err("Can't reserve memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (!iommu->reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) pr_err("Can't map the region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) warn_invalid_dmar(phys_addr, " returns all ones");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) goto unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (ecap_vcs(iommu->ecap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /* the registers might be more than one page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) cap_max_fault_reg_offset(iommu->cap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) map_size = VTD_PAGE_ALIGN(map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (map_size > iommu->reg_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) iounmap(iommu->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) release_mem_region(iommu->reg_phys, iommu->reg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) iommu->reg_size = map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) iommu->name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) pr_err("Can't reserve memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (!iommu->reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) pr_err("Can't map the region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) iounmap(iommu->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) release_mem_region(iommu->reg_phys, iommu->reg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) static int dmar_alloc_seq_id(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) DMAR_UNITS_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) iommu->seq_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) set_bit(iommu->seq_id, dmar_seq_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) sprintf(iommu->name, "dmar%d", iommu->seq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return iommu->seq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static void dmar_free_seq_id(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (iommu->seq_id >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) clear_bit(iommu->seq_id, dmar_seq_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) iommu->seq_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) static int alloc_iommu(struct dmar_drhd_unit *drhd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct intel_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) u32 ver, sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) int agaw = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) int msagaw = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (!drhd->reg_base_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) warn_invalid_dmar(0, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (dmar_alloc_seq_id(iommu) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) pr_err("Failed to allocate seq_id\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) err = map_iommu(iommu, drhd->reg_base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) pr_err("Failed to map %s\n", iommu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) goto error_free_seq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (cap_sagaw(iommu->cap) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) iommu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) drhd->ignored = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (!drhd->ignored) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) agaw = iommu_calculate_agaw(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (agaw < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) iommu->seq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) drhd->ignored = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (!drhd->ignored) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) msagaw = iommu_calculate_max_sagaw(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (msagaw < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) iommu->seq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) drhd->ignored = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) agaw = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) iommu->agaw = agaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) iommu->msagaw = msagaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) iommu->segment = drhd->segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) iommu->node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) ver = readl(iommu->reg + DMAR_VER_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) iommu->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) (unsigned long long)drhd->reg_base_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) (unsigned long long)iommu->cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) (unsigned long long)iommu->ecap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /* Reflect status in gcmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) sts = readl(iommu->reg + DMAR_GSTS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (sts & DMA_GSTS_IRES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) iommu->gcmd |= DMA_GCMD_IRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (sts & DMA_GSTS_TES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) iommu->gcmd |= DMA_GCMD_TE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (sts & DMA_GSTS_QIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) iommu->gcmd |= DMA_GCMD_QIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) raw_spin_lock_init(&iommu->register_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * This is only for hotplug; at boot time intel_iommu_enabled won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * be set yet. When intel_iommu_init() runs, it registers the units
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * present at boot time, then sets intel_iommu_enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (intel_iommu_enabled && !drhd->ignored) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) err = iommu_device_sysfs_add(&iommu->iommu, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) intel_iommu_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) "%s", iommu->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) goto err_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) err = iommu_device_register(&iommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) goto err_sysfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) drhd->iommu = iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) iommu->drhd = drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) err_sysfs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) iommu_device_sysfs_remove(&iommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) err_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) unmap_iommu(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) error_free_seq_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) dmar_free_seq_id(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) kfree(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static void free_iommu(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (intel_iommu_enabled && !iommu->drhd->ignored) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) iommu_device_unregister(&iommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) iommu_device_sysfs_remove(&iommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (iommu->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (iommu->pr_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) free_irq(iommu->pr_irq, iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) dmar_free_hwirq(iommu->pr_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) iommu->pr_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) free_irq(iommu->irq, iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) dmar_free_hwirq(iommu->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) iommu->irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (iommu->qi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) free_page((unsigned long)iommu->qi->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) kfree(iommu->qi->desc_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) kfree(iommu->qi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (iommu->reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) unmap_iommu(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) dmar_free_seq_id(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) kfree(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * Reclaim all the submitted descriptors which have completed its work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) static inline void reclaim_free_desc(struct q_inval *qi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) while (qi->desc_status[qi->free_tail] == QI_DONE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) qi->desc_status[qi->free_tail] == QI_ABORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) qi->desc_status[qi->free_tail] = QI_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) qi->free_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) u32 fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) int head, tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) struct q_inval *qi = iommu->qi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) int shift = qi_shift(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (qi->desc_status[wait_index] == QI_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) fault = readl(iommu->reg + DMAR_FSTS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * If IQE happens, the head points to the descriptor associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * with the error. No new descriptors are fetched until the IQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * is cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (fault & DMA_FSTS_IQE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) head = readl(iommu->reg + DMAR_IQH_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if ((head >> shift) == index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct qi_desc *desc = qi->desc + head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * desc->qw2 and desc->qw3 are either reserved or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * used by software as private data. We won't print
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * out these two qw's for security consideration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) pr_err("VT-d detected invalid descriptor: qw0 = %llx, qw1 = %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) (unsigned long long)desc->qw0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) (unsigned long long)desc->qw1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) memcpy(desc, qi->desc + (wait_index << shift),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 1 << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * If ITE happens, all pending wait_desc commands are aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * No new descriptors are fetched until the ITE is cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (fault & DMA_FSTS_ITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) head = readl(iommu->reg + DMAR_IQH_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) head |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) tail = readl(iommu->reg + DMAR_IQT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (qi->desc_status[head] == QI_IN_USE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) qi->desc_status[head] = QI_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) head = (head - 2 + QI_LENGTH) % QI_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) } while (head != tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (qi->desc_status[wait_index] == QI_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (fault & DMA_FSTS_ICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * Function to submit invalidation descriptors of all types to the queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * invalidation interface(QI). Multiple descriptors can be submitted at a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * time, a wait descriptor will be appended to each submission to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * hardware has completed the invalidation before return. Wait descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * can be part of the submission but it will not be polled for completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) unsigned int count, unsigned long options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) struct q_inval *qi = iommu->qi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) struct qi_desc wait_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) int wait_index, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) int offset, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (!qi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) raw_spin_lock_irqsave(&qi->q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * Check if we have enough empty slots in the queue to submit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * the calculation is based on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * # of desc + 1 wait desc + 1 space between head and tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) while (qi->free_cnt < count + 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) raw_spin_unlock_irqrestore(&qi->q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) raw_spin_lock_irqsave(&qi->q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) index = qi->free_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) wait_index = (index + count) % QI_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) shift = qi_shift(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) offset = ((index + i) % QI_LENGTH) << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) memcpy(qi->desc + offset, &desc[i], 1 << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) qi->desc_status[wait_index] = QI_IN_USE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (options & QI_OPT_WAIT_DRAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) wait_desc.qw0 |= QI_IWD_PRQ_DRAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) wait_desc.qw2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) wait_desc.qw3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) offset = wait_index << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) memcpy(qi->desc + offset, &wait_desc, 1 << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) qi->free_head = (qi->free_head + count + 1) % QI_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) qi->free_cnt -= count + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * update the HW tail register indicating the presence of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * new descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) while (qi->desc_status[wait_index] != QI_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * We will leave the interrupts disabled, to prevent interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * context to queue another cmd while a cmd is already submitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * and waiting for completion on this cpu. This is to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * a deadlock where the interrupt context can wait indefinitely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * for free slots in the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) rc = qi_check_fault(iommu, index, wait_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) raw_spin_unlock(&qi->q_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) raw_spin_lock(&qi->q_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) reclaim_free_desc(qi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) raw_spin_unlock_irqrestore(&qi->q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (rc == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * Flush the global interrupt entry cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) void qi_global_iec(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) struct qi_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) desc.qw0 = QI_IEC_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) desc.qw1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) desc.qw2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) desc.qw3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) /* should never fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) qi_submit_sync(iommu, &desc, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) u64 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) struct qi_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) | QI_CC_GRAN(type) | QI_CC_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) desc.qw1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) desc.qw2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) desc.qw3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) qi_submit_sync(iommu, &desc, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) unsigned int size_order, u64 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) u8 dw = 0, dr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct qi_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) int ih = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (cap_write_drain(iommu->cap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) dw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (cap_read_drain(iommu->cap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) dr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) | QI_IOTLB_AM(size_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) desc.qw2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) desc.qw3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) qi_submit_sync(iommu, &desc, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) u16 qdep, u64 addr, unsigned mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct qi_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (qdep >= QI_DEV_IOTLB_MAX_INVS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) qdep = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) desc.qw2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) desc.qw3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) qi_submit_sync(iommu, &desc, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) /* PASID-based IOTLB invalidation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) unsigned long npages, bool ih)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * npages == -1 means a PASID-selective invalidation, otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * a positive value for Page-selective-within-PASID invalidation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * 0 is not a valid input.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (WARN_ON(!npages)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) pr_err("Invalid input npages = %ld\n", npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (npages == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) desc.qw0 = QI_EIOTLB_PASID(pasid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) QI_EIOTLB_DID(did) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) QI_EIOTLB_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) desc.qw1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) int mask = ilog2(__roundup_pow_of_two(npages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) addr = ALIGN_DOWN(addr, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) desc.qw0 = QI_EIOTLB_PASID(pasid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) QI_EIOTLB_DID(did) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) QI_EIOTLB_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) desc.qw1 = QI_EIOTLB_ADDR(addr) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) QI_EIOTLB_IH(ih) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) QI_EIOTLB_AM(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) qi_submit_sync(iommu, &desc, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) /* PASID-based device IOTLB Invalidate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) u32 pasid, u16 qdep, u64 addr, unsigned int size_order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) QI_DEV_IOTLB_PFSID(pfsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * If S bit is 0, we only flush a single page. If S bit is set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * The least significant zero bit indicates the invalidation address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * range. VT-d spec 6.5.2.6.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) * size order = 0 is PAGE_SIZE 4KB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) * ECAP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) addr, size_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) /* Take page address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) desc.qw1 = QI_DEV_EIOTLB_ADDR(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (size_order) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * Existing 0s in address below size_order may be the least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * significant bit, we must set them to 1s to avoid having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * smaller size than desired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) VTD_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) /* Clear size_order bit to indicate size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) desc.qw1 &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) /* Set the S bit to indicate flushing more than 1 page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) desc.qw1 |= QI_DEV_EIOTLB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) qi_submit_sync(iommu, &desc, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) u64 granu, u32 pasid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) QI_PC_GRAN(granu) | QI_PC_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) qi_submit_sync(iommu, &desc, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * Disable Queued Invalidation interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) void dmar_disable_qi(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) u32 sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) cycles_t start_time = get_cycles();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (!ecap_qis(iommu->ecap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) raw_spin_lock_irqsave(&iommu->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) sts = readl(iommu->reg + DMAR_GSTS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (!(sts & DMA_GSTS_QIES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * Give a chance to HW to complete the pending invalidation requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) while ((readl(iommu->reg + DMAR_IQT_REG) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) readl(iommu->reg + DMAR_IQH_REG)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) iommu->gcmd &= ~DMA_GCMD_QIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) !(sts & DMA_GSTS_QIES), sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * Enable queued invalidation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) static void __dmar_enable_qi(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) u32 sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) struct q_inval *qi = iommu->qi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) u64 val = virt_to_phys(qi->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) qi->free_head = qi->free_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) qi->free_cnt = QI_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * is present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (ecap_smts(iommu->ecap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) val |= (1 << 11) | 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) raw_spin_lock_irqsave(&iommu->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) /* write zero to the tail reg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) writel(0, iommu->reg + DMAR_IQT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) iommu->gcmd |= DMA_GCMD_QIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) /* Make sure hardware complete it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) * Enable Queued Invalidation interface. This is a must to support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) * interrupt-remapping. Also used by DMA-remapping, which replaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) * register based IOTLB invalidation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) int dmar_enable_qi(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) struct q_inval *qi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct page *desc_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (!ecap_qis(iommu->ecap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * queued invalidation is already setup and enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (iommu->qi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (!iommu->qi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) qi = iommu->qi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) * Need two pages to accommodate 256 descriptors of 256 bits each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) * if the remapping hardware supports scalable mode translation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) !!ecap_smts(iommu->ecap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (!desc_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) kfree(qi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) iommu->qi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) qi->desc = page_address(desc_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (!qi->desc_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) free_page((unsigned long) qi->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) kfree(qi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) iommu->qi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) raw_spin_lock_init(&qi->q_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) __dmar_enable_qi(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /* iommu interrupt handling. Most stuff are MSI-like. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) enum faulttype {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) DMA_REMAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) INTR_REMAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) static const char *dma_remap_fault_reasons[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) "Software",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) "Present bit in root entry is clear",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) "Present bit in context entry is clear",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) "Invalid context entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) "Access beyond MGAW",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) "PTE Write access is not set",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) "PTE Read access is not set",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) "Next page table ptr is invalid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) "Root table address invalid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) "Context table ptr is invalid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) "non-zero reserved fields in RTP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) "non-zero reserved fields in CTP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) "non-zero reserved fields in PTE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) "PCE for translation request specifies blocking",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) static const char * const dma_remap_sm_fault_reasons[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) "SM: Invalid Root Table Address",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) "SM: TTM 0 for request with PASID",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) "SM: TTM 0 for page group request",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) "SM: Error attempting to access Root Entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) "SM: Present bit in Root Entry is clear",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) "SM: Non-zero reserved field set in Root Entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) "SM: Error attempting to access Context Entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) "SM: Present bit in Context Entry is clear",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) "SM: Non-zero reserved field set in the Context Entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) "SM: Invalid Context Entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) "SM: DTE field in Context Entry is clear",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) "SM: PASID Enable field in Context Entry is clear",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) "SM: PASID is larger than the max in Context Entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) "SM: PRE field in Context-Entry is clear",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) "SM: RID_PASID field error in Context-Entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) "SM: Error attempting to access the PASID Directory Entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) "SM: Present bit in Directory Entry is clear",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) "SM: Non-zero reserved field set in PASID Directory Entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) "SM: Error attempting to access PASID Table Entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) "SM: Present bit in PASID Table Entry is clear",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) "SM: Non-zero reserved field set in PASID Table Entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) "SM: Invalid Scalable-Mode PASID Table Entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) "SM: ERE field is clear in PASID Table Entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) "SM: SRE field is clear in PASID Table Entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) "Unknown", "Unknown",/* 0x5E-0x5F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) "SM: Error attempting to access first-level paging entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) "SM: Present bit in first-level paging entry is clear",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) "SM: Non-zero reserved field set in first-level paging entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) "SM: Error attempting to access FL-PML4 entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) "SM: First-level entry address beyond MGAW in Nested translation",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) "SM: Read permission error in FL-PML4 entry in Nested translation",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) "SM: Read permission error in first-level paging entry in Nested translation",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) "SM: Write permission error in first-level paging entry in Nested translation",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) "SM: Error attempting to access second-level paging entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) "SM: Read/Write permission error in second-level paging entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) "SM: Non-zero reserved field set in second-level paging entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) "SM: Invalid second-level page table pointer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) "SM: A/D bit update needed in second-level entry when set up in no snoop",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) "Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) "SM: Address in first-level translation is not canonical",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) "SM: U/S set 0 for first-level translation with user privilege",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) "SM: No execute permission for request with PASID and ER=1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) "SM: Address beyond the DMA hardware max",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) "SM: Second-level entry address beyond the max",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) "SM: No write permission for Write/AtomicOp request",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) "SM: No read permission for Read/AtomicOp request",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) "SM: Invalid address-interrupt address",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) "SM: A/D bit update needed in first-level entry when set up in no snoop",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) static const char *irq_remap_fault_reasons[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) "Detected reserved fields in the decoded interrupt-remapped request",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) "Interrupt index exceeded the interrupt-remapping table size",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) "Present field in the IRTE entry is clear",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) "Error accessing interrupt-remapping table pointed by IRTA_REG",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) "Detected reserved fields in the IRTE entry",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) "Blocked a compatibility format interrupt request",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) "Blocked an interrupt request due to source-id verification failure",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (fault_reason >= 0x20 && (fault_reason - 0x20 <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) ARRAY_SIZE(irq_remap_fault_reasons))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) *fault_type = INTR_REMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return irq_remap_fault_reasons[fault_reason - 0x20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) } else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) *fault_type = DMA_REMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) return dma_remap_sm_fault_reasons[fault_reason - 0x30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) *fault_type = DMA_REMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) return dma_remap_fault_reasons[fault_reason];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) *fault_type = UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) return "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (iommu->irq == irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) return DMAR_FECTL_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) else if (iommu->pr_irq == irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) return DMAR_PECTL_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) void dmar_msi_unmask(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) int reg = dmar_msi_reg(iommu, data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) /* unmask it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) raw_spin_lock_irqsave(&iommu->register_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) writel(0, iommu->reg + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) /* Read a reg to force flush the post write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) readl(iommu->reg + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) void dmar_msi_mask(struct irq_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) int reg = dmar_msi_reg(iommu, data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) /* mask it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) raw_spin_lock_irqsave(&iommu->register_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) writel(DMA_FECTL_IM, iommu->reg + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) /* Read a reg to force flush the post write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) readl(iommu->reg + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) void dmar_msi_write(int irq, struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) struct intel_iommu *iommu = irq_get_handler_data(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) int reg = dmar_msi_reg(iommu, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) raw_spin_lock_irqsave(&iommu->register_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) writel(msg->data, iommu->reg + reg + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) writel(msg->address_lo, iommu->reg + reg + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) writel(msg->address_hi, iommu->reg + reg + 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) void dmar_msi_read(int irq, struct msi_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) struct intel_iommu *iommu = irq_get_handler_data(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) int reg = dmar_msi_reg(iommu, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) raw_spin_lock_irqsave(&iommu->register_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) msg->data = readl(iommu->reg + reg + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) msg->address_lo = readl(iommu->reg + reg + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) msg->address_hi = readl(iommu->reg + reg + 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) u8 fault_reason, u32 pasid, u16 source_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) unsigned long long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) const char *reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) int fault_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) reason = dmar_get_fault_reason(fault_reason, &fault_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) if (fault_type == INTR_REMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) source_id >> 8, PCI_SLOT(source_id & 0xFF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) PCI_FUNC(source_id & 0xFF), addr >> 48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) fault_reason, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) pr_err("[%s] Request device [%02x:%02x.%d] PASID %x fault addr %llx [fault reason %02d] %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) type ? "DMA Read" : "DMA Write",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) source_id >> 8, PCI_SLOT(source_id & 0xFF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) PCI_FUNC(source_id & 0xFF), pasid, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) fault_reason, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) #define PRIMARY_FAULT_REG_LEN (16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) irqreturn_t dmar_fault(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) struct intel_iommu *iommu = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) int reg, fault_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) u32 fault_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) static DEFINE_RATELIMIT_STATE(rs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) DEFAULT_RATELIMIT_INTERVAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) DEFAULT_RATELIMIT_BURST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) raw_spin_lock_irqsave(&iommu->register_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) fault_status = readl(iommu->reg + DMAR_FSTS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) if (fault_status && __ratelimit(&rs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) pr_err("DRHD: handling fault status reg %x\n", fault_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) /* TBD: ignore advanced fault log currently */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (!(fault_status & DMA_FSTS_PPF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) goto unlock_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) fault_index = dma_fsts_fault_record_index(fault_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) reg = cap_fault_reg_offset(iommu->cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) /* Disable printing, simply clear the fault when ratelimited */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) bool ratelimited = !__ratelimit(&rs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) u8 fault_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) u16 source_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) u64 guest_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) u32 pasid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) bool pasid_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) /* highest 32 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) data = readl(iommu->reg + reg +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) fault_index * PRIMARY_FAULT_REG_LEN + 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) if (!(data & DMA_FRCD_F))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) if (!ratelimited) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) fault_reason = dma_frcd_fault_reason(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) type = dma_frcd_type(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) pasid = dma_frcd_pasid_value(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) data = readl(iommu->reg + reg +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) fault_index * PRIMARY_FAULT_REG_LEN + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) source_id = dma_frcd_source_id(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) pasid_present = dma_frcd_pasid_present(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) guest_addr = dmar_readq(iommu->reg + reg +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) fault_index * PRIMARY_FAULT_REG_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) guest_addr = dma_frcd_page_addr(guest_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) /* clear the fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) writel(DMA_FRCD_F, iommu->reg + reg +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) fault_index * PRIMARY_FAULT_REG_LEN + 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) if (!ratelimited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) /* Using pasid -1 if pasid is not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) dmar_fault_do_one(iommu, type, fault_reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) pasid_present ? pasid : -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) source_id, guest_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) fault_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) if (fault_index >= cap_num_fault_regs(iommu->cap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) fault_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) raw_spin_lock_irqsave(&iommu->register_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) iommu->reg + DMAR_FSTS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) unlock_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) int dmar_set_interrupt(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) int irq, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) * Check if the fault interrupt is already initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if (iommu->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (irq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) iommu->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) pr_err("No free IRQ vectors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) pr_err("Can't request irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) int __init enable_drhd_fault_handling(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) struct dmar_drhd_unit *drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) struct intel_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) * Enable fault control interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) for_each_iommu(iommu, drhd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) u32 fault_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) int ret = dmar_set_interrupt(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) (unsigned long long)drhd->reg_base_addr, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * Clear any previous faults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) dmar_fault(iommu->irq, iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) fault_status = readl(iommu->reg + DMAR_FSTS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) writel(fault_status, iommu->reg + DMAR_FSTS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * Re-enable Queued Invalidation interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) int dmar_reenable_qi(struct intel_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (!ecap_qis(iommu->ecap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (!iommu->qi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) * First disable queued invalidation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) dmar_disable_qi(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * Then enable queued invalidation again. Since there is no pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) * invalidation requests now, it's safe to re-enable queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) * invalidation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) __dmar_enable_qi(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * Check interrupt remapping support in DMAR table description.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) int __init dmar_ir_support(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) struct acpi_table_dmar *dmar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) dmar = (struct acpi_table_dmar *)dmar_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if (!dmar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) return dmar->flags & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) /* Check whether DMAR units are in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) static inline bool dmar_in_use(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) return irq_remapping_enabled || intel_iommu_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) static int __init dmar_free_unused_resources(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) struct dmar_drhd_unit *dmaru, *dmaru_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) if (dmar_in_use())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) down_write(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) list_del(&dmaru->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) dmar_free_drhd(dmaru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) up_write(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) late_initcall(dmar_free_unused_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) IOMMU_INIT_POST(detect_intel_iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) * DMAR Hotplug Support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) * For more details, please refer to Intel(R) Virtualization Technology
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) * "Remapping Hardware Unit Hot Plug".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) static guid_t dmar_hp_guid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * Currently there's only one revision and BIOS will not check the revision id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) * so use 0 for safety.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) #define DMAR_DSM_REV_ID 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) #define DMAR_DSM_FUNC_DRHD 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) #define DMAR_DSM_FUNC_ATSR 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) #define DMAR_DSM_FUNC_RHSA 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) static inline bool dmar_detect_dsm(acpi_handle handle, int func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) static int dmar_walk_dsm_resource(acpi_handle handle, int func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) dmar_res_handler_t handler, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) union acpi_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) struct acpi_dmar_header *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) struct dmar_res_callback callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) static int res_type[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (!dmar_detect_dsm(handle, func))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) func, NULL, ACPI_TYPE_BUFFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) memset(&callback, 0, sizeof(callback));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) callback.cb[res_type[func]] = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) callback.arg[res_type[func]] = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) start = (struct acpi_dmar_header *)obj->buffer.pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) ACPI_FREE(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) struct dmar_drhd_unit *dmaru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) if (!dmaru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) ret = dmar_ir_hotplug(dmaru, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) ret = dmar_iommu_hotplug(dmaru, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) struct dmar_drhd_unit *dmaru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) if (!dmaru)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * All PCI devices managed by this unit should have been destroyed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) for_each_active_dev_scope(dmaru->devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) dmaru->devices_cnt, i, dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) ret = dmar_ir_hotplug(dmaru, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) ret = dmar_iommu_hotplug(dmaru, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) struct dmar_drhd_unit *dmaru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) if (dmaru) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) list_del_rcu(&dmaru->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) dmar_free_drhd(dmaru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) static int dmar_hotplug_insert(acpi_handle handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) int drhd_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) &dmar_validate_one_drhd, (void *)1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) &dmar_parse_one_drhd, (void *)&drhd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (ret == 0 && drhd_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) } else if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) goto release_drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) &dmar_parse_one_rhsa, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) goto release_drhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) &dmar_parse_one_atsr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) goto release_atsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) &dmar_hp_add_drhd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) &dmar_hp_remove_drhd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) release_atsr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) &dmar_release_one_atsr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) release_drhd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) &dmar_hp_release_drhd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) static int dmar_hotplug_remove(acpi_handle handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) &dmar_check_one_atsr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) &dmar_hp_remove_drhd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) &dmar_release_one_atsr, NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) &dmar_hp_release_drhd, NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) &dmar_hp_add_drhd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) void *context, void **retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) acpi_handle *phdl = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) *phdl = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) return AE_CTRL_TERMINATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) return AE_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) static int dmar_device_hotplug(acpi_handle handle, bool insert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) acpi_handle tmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if (!dmar_in_use())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) tmp = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) ACPI_UINT32_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) dmar_get_dsm_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) NULL, NULL, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) if (ACPI_FAILURE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) pr_warn("Failed to locate _DSM method.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) if (tmp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) down_write(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) if (insert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) ret = dmar_hotplug_insert(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) ret = dmar_hotplug_remove(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) up_write(&dmar_global_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) int dmar_device_add(acpi_handle handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) return dmar_device_hotplug(handle, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) int dmar_device_remove(acpi_handle handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) return dmar_device_hotplug(handle, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) * the ACPI DMAR table. This means that the platform boot firmware has made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) * sure no device can issue DMA outside of RMRR regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) bool dmar_platform_optin(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) struct acpi_table_dmar *dmar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) acpi_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) status = acpi_get_table(ACPI_SIG_DMAR, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) (struct acpi_table_header **)&dmar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) if (ACPI_FAILURE(status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) acpi_put_table((struct acpi_table_header *)dmar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) EXPORT_SYMBOL_GPL(dmar_platform_optin);