^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Rewrite, cleanup, new allocation schemes, virtual merging:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2004 Olof Johansson, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * and Ben. Herrenschmidt, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Dynamic DMA mapping support, bus-independent parts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/iommu-helper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/fault-inject.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/pci-bridge.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/kdump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/fadump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/vio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/tce.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define DBG(...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static int novmerge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static int __init setup_iommu(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (!strcmp(str, "novmerge"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) novmerge = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) else if (!strcmp(str, "vmerge"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) novmerge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __setup("iommu=", setup_iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * We precalculate the hash to avoid doing it on every allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * The hash is important to spread CPUs across all the pools. For example,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * with 4 pools all primary threads would map to the same pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static int __init setup_iommu_pool_hash(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) subsys_initcall(setup_iommu_pool_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #ifdef CONFIG_FAIL_IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static DECLARE_FAULT_ATTR(fail_iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static int __init setup_fail_iommu(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return setup_fault_attr(&fail_iommu, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) __setup("fail_iommu=", setup_fail_iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static bool should_fail_iommu(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static int __init fail_iommu_debugfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) NULL, &fail_iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return PTR_ERR_OR_ZERO(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) late_initcall(fail_iommu_debugfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static ssize_t fail_iommu_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static ssize_t fail_iommu_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct device_attribute *attr, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (count > 0 && sscanf(buf, "%d", &i) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static DEVICE_ATTR_RW(fail_iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static int fail_iommu_bus_notify(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned long action, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct device *dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (action == BUS_NOTIFY_ADD_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (device_create_file(dev, &dev_attr_fail_iommu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) pr_warn("Unable to create IOMMU fault injection sysfs "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) "entries\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) } else if (action == BUS_NOTIFY_DEL_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) device_remove_file(dev, &dev_attr_fail_iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static struct notifier_block fail_iommu_bus_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) .notifier_call = fail_iommu_bus_notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int __init fail_iommu_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #ifdef CONFIG_IBMVIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Must execute after PCI and VIO subsystem have initialised but before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * devices are probed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) arch_initcall(fail_iommu_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline bool should_fail_iommu(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static unsigned long iommu_range_alloc(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct iommu_table *tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned long npages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned long *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned long mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned int align_order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned long n, end, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned long limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int largealloc = npages > 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int pass = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned long align_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned int pool_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct iommu_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) align_mask = (1ull << align_order) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* This allocator was derived from x86_64's bit string search */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (unlikely(npages == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (should_fail_iommu(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * We don't need to disable preemption here because any CPU can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * safely use any IOMMU pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (largealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) pool = &(tbl->large_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pool = &(tbl->pools[pool_nr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) spin_lock_irqsave(&(pool->lock), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if ((pass == 0) && handle && *handle &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) (*handle >= pool->start) && (*handle < pool->end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) start = *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) start = pool->hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) limit = pool->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* The case below can happen if we have a small segment appended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * to a large, or when the previous alloc was at the very end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * the available space. If so, go back to the initial start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (start >= limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) start = pool->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (limit + tbl->it_offset > mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) limit = mask - tbl->it_offset + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* If we're constrained on address range, first try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * at the masked hint to avoid O(n) search complexity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * but on second pass, start at 0 in pool 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if ((start & mask) >= limit || pass > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) spin_unlock(&(pool->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) pool = &(tbl->pools[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) spin_lock(&(pool->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) start = pool->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) start &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) align_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (n == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (likely(pass == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* First try the pool from the start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) pool->hint = pool->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) pass++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) } else if (pass <= tbl->nr_pools) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* Now try scanning all the other pools */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) spin_unlock(&(pool->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) pool = &tbl->pools[pool_nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) spin_lock(&(pool->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) pool->hint = pool->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) pass++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Give up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) spin_unlock_irqrestore(&(pool->lock), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) end = n + npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Bump the hint to a new block for small allocs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (largealloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Don't bump to new block to avoid fragmentation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) pool->hint = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* Overflow will be taken care of at the next allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) pool->hint = (end + tbl->it_blocksize - 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ~(tbl->it_blocksize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* Update handle for SG allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) *handle = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) spin_unlock_irqrestore(&(pool->lock), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) void *page, unsigned int npages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) unsigned long mask, unsigned int align_order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) unsigned long entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) dma_addr_t ret = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int build_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (unlikely(entry == DMA_MAPPING_ERROR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) entry += tbl->it_offset; /* Offset into real TCE table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) ret = entry << tbl->it_page_shift; /* Set the return dma address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Put the TCEs in the HW table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) build_fail = tbl->it_ops->set(tbl, entry, npages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) (unsigned long)page &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) IOMMU_PAGE_MASK(tbl), direction, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* tbl->it_ops->set() only returns non-zero for transient errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * Clean up the table bitmap in this case and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * DMA_MAPPING_ERROR. For all other errors the functionality is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * not altered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (unlikely(build_fail)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) __iommu_free(tbl, ret, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* Flush/invalidate TLB caches if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (tbl->it_ops->flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) tbl->it_ops->flush(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Make sure updates are seen by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned int npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) unsigned long entry, free_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) entry = dma_addr >> tbl->it_page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) free_entry = entry - tbl->it_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (((free_entry + npages) > tbl->it_size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) (entry < tbl->it_offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (printk_ratelimit()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) printk(KERN_INFO "iommu_free: invalid entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) printk(KERN_INFO "\tentry = 0x%lx\n", entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static struct iommu_pool *get_pool(struct iommu_table *tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned long entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct iommu_pool *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) unsigned long largepool_start = tbl->large_pool.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* The large pool is the last pool at the top of the table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (entry >= largepool_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) p = &tbl->large_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned int pool_nr = entry / tbl->poolsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) BUG_ON(pool_nr > tbl->nr_pools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) p = &tbl->pools[pool_nr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) unsigned int npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) unsigned long entry, free_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct iommu_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) entry = dma_addr >> tbl->it_page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) free_entry = entry - tbl->it_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) pool = get_pool(tbl, free_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (!iommu_free_check(tbl, dma_addr, npages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) tbl->it_ops->clear(tbl, entry, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) spin_lock_irqsave(&(pool->lock), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) bitmap_clear(tbl->it_map, free_entry, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) spin_unlock_irqrestore(&(pool->lock), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) unsigned int npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) __iommu_free(tbl, dma_addr, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* Make sure TLB cache is flushed if the HW needs it. We do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * not do an mb() here on purpose, it is not needed on any of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * the current platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (tbl->it_ops->flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) tbl->it_ops->flush(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct scatterlist *sglist, int nelems,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned long mask, enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dma_addr_t dma_next = 0, dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct scatterlist *s, *outs, *segstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int outcount, incount, i, build_fail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) unsigned int align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) unsigned long handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) unsigned int max_seg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) BUG_ON(direction == DMA_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if ((nelems == 0) || !tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) outs = s = segstart = &sglist[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) outcount = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) incount = nelems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* Init first segment length for backout at failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) outs->dma_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) DBG("sg mapping %d elements:\n", nelems);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) max_seg_size = dma_get_max_seg_size(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) for_each_sg(sglist, s, nelems, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) unsigned long vaddr, npages, entry, slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) slen = s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (slen == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) dma_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* Allocate iommu entries for that segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) vaddr = (unsigned long) sg_virt(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) (vaddr & ~PAGE_MASK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) align = PAGE_SHIFT - tbl->it_page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) entry = iommu_range_alloc(dev, tbl, npages, &handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) mask >> tbl->it_page_shift, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* Handle failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (unlikely(entry == DMA_MAPPING_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (!(attrs & DMA_ATTR_NO_WARN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) dev_info(dev, "iommu_alloc failed, tbl %p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) "vaddr %lx npages %lu\n", tbl, vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* Convert entry to a dma_addr_t */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) entry += tbl->it_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dma_addr = entry << tbl->it_page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) npages, entry, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* Insert into HW table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) build_fail = tbl->it_ops->set(tbl, entry, npages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) vaddr & IOMMU_PAGE_MASK(tbl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) direction, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if(unlikely(build_fail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* If we are in an open segment, try merging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (segstart != s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) DBG(" - trying merge...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* We cannot merge if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * - allocated dma_addr isn't contiguous to previous allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (novmerge || (dma_addr != dma_next) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) (outs->dma_length + s->length > max_seg_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* Can't merge: create a new segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) segstart = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) outcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) outs = sg_next(outs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) DBG(" can't merge, new segment.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) outs->dma_length += s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) DBG(" merged, new len: %ux\n", outs->dma_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (segstart == s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* This is a new segment, fill entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) DBG(" - filling new segment.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) outs->dma_address = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) outs->dma_length = slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* Calculate next page pointer for contiguous check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) dma_next = dma_addr + slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) DBG(" - dma next is: %lx\n", dma_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* Flush/invalidate TLB caches if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (tbl->it_ops->flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) tbl->it_ops->flush(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) DBG("mapped %d elements:\n", outcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * next entry of the sglist if we didn't fill the list completely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (outcount < incount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) outs = sg_next(outs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) outs->dma_address = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) outs->dma_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* Make sure updates are seen by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return outcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) for_each_sg(sglist, s, nelems, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (s->dma_length != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) unsigned long vaddr, npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) npages = iommu_num_pages(s->dma_address, s->dma_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) IOMMU_PAGE_SIZE(tbl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) __iommu_free(tbl, vaddr, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) s->dma_address = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) s->dma_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (s == outs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int nelems, enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) BUG_ON(direction == DMA_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (!tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) sg = sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) while (nelems--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) unsigned int npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) dma_addr_t dma_handle = sg->dma_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (sg->dma_length == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) npages = iommu_num_pages(dma_handle, sg->dma_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) IOMMU_PAGE_SIZE(tbl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) __iommu_free(tbl, dma_handle, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * do not do an mb() here, the affected platforms do not need it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * when freeing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (tbl->it_ops->flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) tbl->it_ops->flush(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static void iommu_table_clear(struct iommu_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * In case of firmware assisted dump system goes through clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * reboot process at the time of system crash. Hence it's safe to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * clear the TCE entries if firmware assisted dump is active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (!is_kdump_kernel() || is_fadump_active()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* Clear the table in case firmware left allocations in it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) #ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (tbl->it_ops->get) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) unsigned long index, tceval, tcecount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /* Reserve the existing mappings left by the first kernel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) for (index = 0; index < tbl->it_size; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * Freed TCE entry contains 0x7fffffffffffffff on JS20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (tceval && (tceval != 0x7fffffffffffffffUL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) __set_bit(index, tbl->it_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) tcecount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) printk(KERN_WARNING "TCE table is full; freeing ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) printk(KERN_WARNING "%d entries for the kdump boot\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) KDUMP_MIN_TCE_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) index < tbl->it_size; index++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) __clear_bit(index, tbl->it_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static void iommu_table_reserve_pages(struct iommu_table *tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) unsigned long res_start, unsigned long res_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) WARN_ON_ONCE(res_end < res_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * Reserve page 0 so it will not be used for any mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * This avoids buggy drivers that consider page 0 to be invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * to crash the machine or even lose data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (tbl->it_offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) set_bit(0, tbl->it_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) tbl->it_reserved_start = res_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) tbl->it_reserved_end = res_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* Check if res_start..res_end isn't empty and overlaps the table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (res_start && res_end &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) (tbl->it_offset + tbl->it_size < res_start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) res_end < tbl->it_offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) set_bit(i - tbl->it_offset, tbl->it_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static void iommu_table_release_pages(struct iommu_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * In case we have reserved the first bit, we should not emit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * the warning below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (tbl->it_offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) clear_bit(0, tbl->it_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) clear_bit(i - tbl->it_offset, tbl->it_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * Build a iommu_table structure. This contains a bit map which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * is used to manage allocation of the tce space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) unsigned long res_start, unsigned long res_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) unsigned long sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static int welcomed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct iommu_pool *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) BUG_ON(!tbl->it_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* number of bytes needed for the bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) tbl->it_map = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) memset(tbl->it_map, 0, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) iommu_table_reserve_pages(tbl, res_start, res_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /* We only split the IOMMU table if we have 1GB or more of space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) tbl->nr_pools = IOMMU_NR_POOLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) tbl->nr_pools = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* We reserve the top 1/4 of the table for large allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) for (i = 0; i < tbl->nr_pools; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) p = &tbl->pools[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) spin_lock_init(&(p->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) p->start = tbl->poolsize * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) p->hint = p->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) p->end = p->start + tbl->poolsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) p = &tbl->large_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) spin_lock_init(&(p->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) p->start = tbl->poolsize * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) p->hint = p->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) p->end = tbl->it_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) iommu_table_clear(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (!welcomed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) novmerge ? "disabled" : "enabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) welcomed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static void iommu_table_free(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) unsigned long bitmap_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) unsigned int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct iommu_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) tbl = container_of(kref, struct iommu_table, it_kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (tbl->it_ops->free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) tbl->it_ops->free(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (!tbl->it_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) kfree(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) iommu_table_release_pages(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /* verify that table contains no entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (!bitmap_empty(tbl->it_map, tbl->it_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) pr_warn("%s: Unexpected TCEs\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /* calculate bitmap size in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /* free bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) order = get_order(bitmap_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) free_pages((unsigned long) tbl->it_map, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) /* free table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) kfree(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (kref_get_unless_zero(&tbl->it_kref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) EXPORT_SYMBOL_GPL(iommu_tce_table_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int iommu_tce_table_put(struct iommu_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (WARN_ON(!tbl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return kref_put(&tbl->it_kref, iommu_table_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) EXPORT_SYMBOL_GPL(iommu_tce_table_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* Creates TCEs for a user provided buffer. The user buffer must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * contiguous real kernel storage (not vmalloc). The address passed here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * comprises a page address and offset into that page. The dma_addr_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * returned will point to the same byte within the page as was passed in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct page *page, unsigned long offset, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) unsigned long mask, enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) dma_addr_t dma_handle = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) unsigned long uaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) unsigned int npages, align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) BUG_ON(direction == DMA_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) vaddr = page_address(page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) uaddr = (unsigned long)vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) ((unsigned long)vaddr & ~PAGE_MASK) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) align = PAGE_SHIFT - tbl->it_page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) mask >> tbl->it_page_shift, align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (dma_handle == DMA_MAPPING_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (!(attrs & DMA_ATTR_NO_WARN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) printk_ratelimit()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) dev_info(dev, "iommu_alloc failed, tbl %p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) "vaddr %p npages %d\n", tbl, vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) size_t size, enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) unsigned int npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) BUG_ON(direction == DMA_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) npages = iommu_num_pages(dma_handle, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) IOMMU_PAGE_SIZE(tbl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) iommu_free(tbl, dma_handle, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* Allocates a contiguous real buffer and creates mappings over it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * Returns the virtual address of the buffer and sets dma_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * to the dma address (mapping) of the first page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) size_t size, dma_addr_t *dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) unsigned long mask, gfp_t flag, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) void *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) unsigned int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) unsigned int nio_pages, io_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * Client asked for way too much space. This is checked later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * anyway. It is easier to debug here for the drivers than in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * the tce tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (order >= IOMAP_MAX_ORDER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (!tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* Alloc enough pages (and possibly more) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) page = alloc_pages_node(node, flag, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ret = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) memset(ret, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /* Set up tces to cover the allocated range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) nio_pages = size >> tbl->it_page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) io_order = get_iommu_order(size, tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) mask >> tbl->it_page_shift, io_order, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (mapping == DMA_MAPPING_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) free_pages((unsigned long)ret, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) *dma_handle = mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) void iommu_free_coherent(struct iommu_table *tbl, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) void *vaddr, dma_addr_t dma_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) unsigned int nio_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) nio_pages = size >> tbl->it_page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) iommu_free(tbl, dma_handle, nio_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) free_pages((unsigned long)vaddr, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return TCE_PCI_READ | TCE_PCI_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return TCE_PCI_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return TCE_PCI_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) #ifdef CONFIG_IOMMU_API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * SPAPR TCE API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) static void group_release(void *iommu_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct iommu_table_group *table_group = iommu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) table_group->group = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) void iommu_register_group(struct iommu_table_group *table_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) int pci_domain_number, unsigned long pe_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct iommu_group *grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) grp = iommu_group_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (IS_ERR(grp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) PTR_ERR(grp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) table_group->group = grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) iommu_group_set_iommudata(grp, table_group, group_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) pci_domain_number, pe_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) iommu_group_set_name(grp, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) enum dma_data_direction iommu_tce_direction(unsigned long tce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return DMA_BIDIRECTIONAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) else if (tce & TCE_PCI_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) else if (tce & TCE_PCI_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) EXPORT_SYMBOL_GPL(iommu_tce_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) void iommu_flush_tce(struct iommu_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /* Flush/invalidate TLB caches if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (tbl->it_ops->flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) tbl->it_ops->flush(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /* Make sure updates are seen by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) EXPORT_SYMBOL_GPL(iommu_flush_tce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) int iommu_tce_check_ioba(unsigned long page_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) unsigned long offset, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) unsigned long ioba, unsigned long npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) unsigned long mask = (1UL << page_shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (ioba & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) ioba >>= page_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (ioba < offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if ((ioba + 1) > (offset + size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) unsigned long mask = (1UL << page_shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (gpa & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct iommu_table *tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) unsigned long entry, unsigned long *hpa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) enum dma_data_direction *direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) unsigned long size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (!ret && ((*direction == DMA_FROM_DEVICE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) (*direction == DMA_BIDIRECTIONAL)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) &size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) void iommu_tce_kill(struct iommu_table *tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) unsigned long entry, unsigned long pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (tbl->it_ops->tce_kill)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) tbl->it_ops->tce_kill(tbl, entry, pages, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) EXPORT_SYMBOL_GPL(iommu_tce_kill);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) int iommu_take_ownership(struct iommu_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * VFIO does not control TCE entries allocation and the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * can write new TCEs on top of existing ones so iommu_tce_build()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * must be able to release old pages. This functionality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * requires exchange() callback defined so if it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * implemented, we disallow taking ownership over the table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (!tbl->it_ops->xchg_no_kill)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) spin_lock_irqsave(&tbl->large_pool.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) for (i = 0; i < tbl->nr_pools; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) iommu_table_release_pages(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) pr_err("iommu_tce: it_map is not empty");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /* Undo iommu_table_release_pages, i.e. restore bit#0, etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) tbl->it_reserved_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) memset(tbl->it_map, 0xff, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) for (i = 0; i < tbl->nr_pools; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) spin_unlock(&tbl->pools[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) EXPORT_SYMBOL_GPL(iommu_take_ownership);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) void iommu_release_ownership(struct iommu_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) spin_lock_irqsave(&tbl->large_pool.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) for (i = 0; i < tbl->nr_pools; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) memset(tbl->it_map, 0, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) tbl->it_reserved_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) for (i = 0; i < tbl->nr_pools; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) spin_unlock(&tbl->pools[i].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) EXPORT_SYMBOL_GPL(iommu_release_ownership);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * The sysfs entries should be populated before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * binding IOMMU group. If sysfs entries isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * ready, we simply bail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (!device_is_registered(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (device_iommu_mapped(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) pr_debug("%s: Skipping device %s with iommu group %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) __func__, dev_name(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) iommu_group_id(dev->iommu_group));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) pr_debug("%s: Adding %s to iommu group %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) __func__, dev_name(dev), iommu_group_id(table_group->group));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return iommu_group_add_device(table_group->group, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) EXPORT_SYMBOL_GPL(iommu_add_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) void iommu_del_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * Some devices might not have IOMMU table and group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * and we needn't detach them from the associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * IOMMU groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (!device_iommu_mapped(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) pr_debug("iommu_tce: skipping device %s with no tbl\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) iommu_group_remove_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) EXPORT_SYMBOL_GPL(iommu_del_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) #endif /* CONFIG_IOMMU_API */