^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * omap iommu: tlb and pagetable primitives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2008-2010 Nokia Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Paul Mundt and Toshihiro Kobayashi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/omap-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/of_iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/mfd/syscon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/platform_data/iommu-omap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "omap-iopgtable.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "omap-iommu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static const struct iommu_ops omap_iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* bitmap of the page sizes currently supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define MMU_LOCK_BASE_SHIFT 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define MMU_LOCK_BASE(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define MMU_LOCK_VICT_SHIFT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define MMU_LOCK_VICT(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static struct platform_driver omap_iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static struct kmem_cache *iopte_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * @dom: generic iommu domain handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return container_of(dom, struct omap_iommu_domain, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * omap_iommu_save_ctx - Save registers for pm off-mode support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * @dev: client device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * This should be treated as an deprecated API. It is preserved only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * to maintain existing functionality for OMAP3 ISP driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void omap_iommu_save_ctx(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct omap_iommu *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!arch_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) while (arch_data->iommu_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) obj = arch_data->iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) p = obj->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) p[i] = iommu_read_reg(obj, i * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) p[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) arch_data++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * omap_iommu_restore_ctx - Restore registers for pm off-mode support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @dev: client device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * This should be treated as an deprecated API. It is preserved only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * to maintain existing functionality for OMAP3 ISP driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void omap_iommu_restore_ctx(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct omap_iommu *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u32 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (!arch_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) while (arch_data->iommu_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) obj = arch_data->iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) p = obj->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) iommu_write_reg(obj, p[i], i * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) p[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) arch_data++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u32 val, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!obj->syscfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) val = enable ? mask : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void __iommu_set_twl(struct omap_iommu *obj, bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u32 l = iommu_read_reg(obj, MMU_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) l &= ~MMU_CNTL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) l |= (MMU_CNTL_MMU_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) iommu_write_reg(obj, l, MMU_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static int omap2_iommu_enable(struct omap_iommu *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u32 l, pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) pa = virt_to_phys(obj->iopgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!IS_ALIGNED(pa, SZ_16K))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) l = iommu_read_reg(obj, MMU_REVISION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) (l >> 4) & 0xf, l & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) iommu_write_reg(obj, pa, MMU_TTB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) dra7_cfg_dspsys_mmu(obj, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (obj->has_bus_err_back)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) __iommu_set_twl(obj, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static void omap2_iommu_disable(struct omap_iommu *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u32 l = iommu_read_reg(obj, MMU_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) l &= ~MMU_CNTL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) iommu_write_reg(obj, l, MMU_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dra7_cfg_dspsys_mmu(obj, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static int iommu_enable(struct omap_iommu *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ret = pm_runtime_get_sync(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) pm_runtime_put_noidle(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return ret < 0 ? ret : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static void iommu_disable(struct omap_iommu *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) pm_runtime_put_sync(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * TLB operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static u32 iotlb_cr_to_virt(struct cr_regs *cr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) u32 mask = get_cam_va_mask(cr->cam & page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return cr->cam & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static u32 get_iopte_attr(struct iotlb_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u32 attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) attr = e->mixed << 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) attr |= e->endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) attr |= e->elsz >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u32 status, fault_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) status = iommu_read_reg(obj, MMU_IRQSTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) status &= MMU_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *da = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) *da = fault_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) iommu_write_reg(obj, status, MMU_IRQSTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) val = iommu_read_reg(obj, MMU_LOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) l->base = MMU_LOCK_BASE(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) l->vict = MMU_LOCK_VICT(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) val = (l->base << MMU_LOCK_BASE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) val |= (l->vict << MMU_LOCK_VICT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) iommu_write_reg(obj, val, MMU_LOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) iommu_write_reg(obj, cr->ram, MMU_RAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) iommu_write_reg(obj, 1, MMU_LD_TLB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* only used in iotlb iteration for-loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct cr_regs cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct iotlb_lock l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) iotlb_lock_get(obj, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) l.vict = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) iotlb_lock_set(obj, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) iotlb_read_cr(obj, &cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #ifdef PREFETCH_IOTLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct iotlb_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct cr_regs *cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (e->da & ~(get_cam_va_mask(e->pgsz))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) e->da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) cr = kmalloc(sizeof(*cr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!cr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) cr->ram = e->pa | e->endian | e->elsz | e->mixed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * load_iotlb_entry - Set an iommu tlb entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * @obj: target iommu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * @e: an iommu tlb entry info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct iotlb_lock l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct cr_regs *cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!obj || !obj->nr_tlb_entries || !e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) pm_runtime_get_sync(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) iotlb_lock_get(obj, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (l.base == obj->nr_tlb_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!e->prsvd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct cr_regs tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (!iotlb_cr_valid(&tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (i == obj->nr_tlb_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) iotlb_lock_get(obj, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) l.vict = l.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) iotlb_lock_set(obj, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) cr = iotlb_alloc_cr(obj, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (IS_ERR(cr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) pm_runtime_put_sync(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return PTR_ERR(cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) iotlb_load_cr(obj, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) kfree(cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (e->prsvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) l.base++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* increment victim for next tlb load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (++l.vict == obj->nr_tlb_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) l.vict = l.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) iotlb_lock_set(obj, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) pm_runtime_put_sync(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #else /* !PREFETCH_IOTLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #endif /* !PREFETCH_IOTLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return load_iotlb_entry(obj, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * flush_iotlb_page - Clear an iommu tlb entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * @obj: target iommu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * @da: iommu device virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * Clear an iommu tlb entry which includes 'da' address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct cr_regs cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) pm_runtime_get_sync(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) u32 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) size_t bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (!iotlb_cr_valid(&cr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) start = iotlb_cr_to_virt(&cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) bytes = iopgsz_to_bytes(cr.cam & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if ((start <= da) && (da < start + bytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) __func__, start, da, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) iotlb_load_cr(obj, &cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) pm_runtime_put_sync(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (i == obj->nr_tlb_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * flush_iotlb_all - Clear all iommu tlb entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * @obj: target iommu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static void flush_iotlb_all(struct omap_iommu *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct iotlb_lock l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) pm_runtime_get_sync(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) l.base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) l.vict = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) iotlb_lock_set(obj, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) iommu_write_reg(obj, 1, MMU_GFLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) pm_runtime_put_sync(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * H/W pagetable operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void flush_iopte_range(struct device *dev, dma_addr_t dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) unsigned long offset, int num_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) size_t size = num_entries * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) dma_sync_single_range_for_device(dev, dma, offset, size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) dma_addr_t pt_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* Note: freed iopte's must be clean ready for re-use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (iopte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (dma_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) pt_dma = virt_to_phys(iopte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) kmem_cache_free(iopte_cachep, iopte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) dma_addr_t *pt_dma, u32 da)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) u32 *iopte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) unsigned long offset = iopgd_index(da) * sizeof(da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* a table has already existed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (*iopgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) goto pte_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * do the allocation outside the page table lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) spin_unlock(&obj->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) spin_lock(&obj->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!*iopgd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (!iopte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (dma_mapping_error(obj->dev, *pt_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dev_err(obj->dev, "DMA map error for L2 table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) iopte_free(obj, iopte, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * we rely on dma address and the physical address to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * the same for mapping the L2 table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (WARN_ON(*pt_dma != virt_to_phys(iopte))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) dev_err(obj->dev, "DMA translation error for L2 table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) iopte_free(obj, iopte, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* We raced, free the reduniovant table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) iopte_free(obj, iopte, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) pte_ready:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) iopte = iopte_offset(iopgd, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) *pt_dma = iopgd_page_paddr(iopgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) dev_vdbg(obj->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) __func__, da, iopgd, *iopgd, iopte, *iopte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return iopte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) u32 *iopgd = iopgd_offset(obj, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) unsigned long offset = iopgd_index(da) * sizeof(da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if ((da | pa) & ~IOSECTION_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) __func__, da, pa, IOSECTION_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) u32 *iopgd = iopgd_offset(obj, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) unsigned long offset = iopgd_index(da) * sizeof(da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if ((da | pa) & ~IOSUPER_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) __func__, da, pa, IOSUPER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) flush_iopte_range(obj->dev, obj->pd_dma, offset, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) u32 *iopgd = iopgd_offset(obj, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) dma_addr_t pt_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) unsigned long offset = iopte_index(da) * sizeof(da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (IS_ERR(iopte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return PTR_ERR(iopte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) flush_iopte_range(obj->dev, pt_dma, offset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) __func__, da, pa, iopte, *iopte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) u32 *iopgd = iopgd_offset(obj, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) dma_addr_t pt_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) unsigned long offset = iopte_index(da) * sizeof(da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if ((da | pa) & ~IOLARGE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) __func__, da, pa, IOLARGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (IS_ERR(iopte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return PTR_ERR(iopte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) flush_iopte_range(obj->dev, pt_dma, offset, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) int (*fn)(struct omap_iommu *, u32, u32, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) u32 prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!obj || !e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) switch (e->pgsz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) case MMU_CAM_PGSZ_16M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) fn = iopgd_alloc_super;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) case MMU_CAM_PGSZ_1M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) fn = iopgd_alloc_section;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) case MMU_CAM_PGSZ_64K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) fn = iopte_alloc_large;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) case MMU_CAM_PGSZ_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) fn = iopte_alloc_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (WARN_ON(!fn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) prot = get_iopte_attr(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) spin_lock(&obj->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) err = fn(obj, e->da, e->pa, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) spin_unlock(&obj->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * omap_iopgtable_store_entry - Make an iommu pte entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * @obj: target iommu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * @e: an iommu tlb entry info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) flush_iotlb_page(obj, e->da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) err = iopgtable_store_entry_core(obj, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) prefetch_iotlb_entry(obj, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * iopgtable_lookup_entry - Lookup an iommu pte entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * @obj: target iommu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * @da: iommu device virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * @ppgd: iommu pgd entry pointer to be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * @ppte: iommu pte entry pointer to be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) u32 *iopgd, *iopte = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) iopgd = iopgd_offset(obj, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (!*iopgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (iopgd_is_table(*iopgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) iopte = iopte_offset(iopgd, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) *ppgd = iopgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) *ppte = iopte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) size_t bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) u32 *iopgd = iopgd_offset(obj, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) int nent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) dma_addr_t pt_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) unsigned long pd_offset = iopgd_index(da) * sizeof(da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) unsigned long pt_offset = iopte_index(da) * sizeof(da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (!*iopgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (iopgd_is_table(*iopgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) u32 *iopte = iopte_offset(iopgd, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) bytes = IOPTE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (*iopte & IOPTE_LARGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) nent *= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /* rewind to the 1st entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) bytes *= nent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) memset(iopte, 0, nent * sizeof(*iopte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) pt_dma = iopgd_page_paddr(iopgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * do table walk to check if this table is necessary or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) iopte = iopte_offset(iopgd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) for (i = 0; i < PTRS_PER_IOPTE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (iopte[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) iopte_free(obj, iopte, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) nent = 1; /* for the next L1 entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) bytes = IOPGD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) nent *= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /* rewind to the 1st entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) bytes *= nent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) memset(iopgd, 0, nent * sizeof(*iopgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * iopgtable_clear_entry - Remove an iommu pte entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * @obj: target iommu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * @da: iommu device virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) size_t bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) spin_lock(&obj->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) bytes = iopgtable_clear_entry_core(obj, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) flush_iotlb_page(obj, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) spin_unlock(&obj->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) static void iopgtable_clear_entry_all(struct omap_iommu *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) spin_lock(&obj->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) for (i = 0; i < PTRS_PER_IOPGD; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) u32 da;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) u32 *iopgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) da = i << IOPGD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) iopgd = iopgd_offset(obj, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) offset = iopgd_index(da) * sizeof(da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (!*iopgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (iopgd_is_table(*iopgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) iopte_free(obj, iopte_offset(iopgd, 0), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) *iopgd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) flush_iotlb_all(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) spin_unlock(&obj->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * Device IOMMU generic operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static irqreturn_t iommu_fault_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) u32 da, errs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) u32 *iopgd, *iopte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct omap_iommu *obj = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct iommu_domain *domain = obj->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (!omap_domain->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) errs = iommu_report_fault(obj, &da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (errs == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* Fault callback or TLB/PTE Dynamic loading */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (!report_iommu_fault(domain, obj->dev, da, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) iommu_write_reg(obj, 0, MMU_IRQENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) iopgd = iopgd_offset(obj, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (!iopgd_is_table(*iopgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) obj->name, errs, da, iopgd, *iopgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) iopte = iopte_offset(iopgd, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * omap_iommu_attach() - attach iommu device to an iommu domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * @obj: target omap iommu device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * @iopgd: page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) spin_lock(&obj->iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (dma_mapping_error(obj->dev, obj->pd_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) dev_err(obj->dev, "DMA map error for L1 table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) obj->iopgd = iopgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) err = iommu_enable(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) flush_iotlb_all(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) spin_unlock(&obj->iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) spin_unlock(&obj->iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * omap_iommu_detach - release iommu device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * @obj: target iommu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static void omap_iommu_detach(struct omap_iommu *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (!obj || IS_ERR(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) spin_lock(&obj->iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) obj->pd_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) obj->iopgd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) iommu_disable(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) spin_unlock(&obj->iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) static void omap_iommu_save_tlb_entries(struct omap_iommu *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct iotlb_lock lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct cr_regs cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct cr_regs *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* check if there are any locked tlbs to save */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) iotlb_lock_get(obj, &lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) obj->num_cr_ctx = lock.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (!obj->num_cr_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) tmp = obj->cr_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * tmp++ = cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct iotlb_lock l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct cr_regs *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /* no locked tlbs to restore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (!obj->num_cr_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) l.base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) tmp = obj->cr_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) for (i = 0; i < obj->num_cr_ctx; i++, tmp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) l.vict = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) iotlb_lock_set(obj, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) iotlb_load_cr(obj, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) l.base = obj->num_cr_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) l.vict = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) iotlb_lock_set(obj, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * omap_iommu_domain_deactivate - deactivate attached iommu devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * @domain: iommu domain attached to the target iommu device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * This API allows the client devices of IOMMU devices to suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * the IOMMUs they control at runtime, after they are idled and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * suspended all activity. System Suspend will leverage the PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * driver late callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) int omap_iommu_domain_deactivate(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct omap_iommu_device *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct omap_iommu *oiommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (!omap_domain->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) iommu = omap_domain->iommus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) iommu += (omap_domain->num_iommus - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) for (i = 0; i < omap_domain->num_iommus; i++, iommu--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) oiommu = iommu->iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) pm_runtime_put_sync(oiommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * omap_iommu_domain_activate - activate attached iommu devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * @domain: iommu domain attached to the target iommu device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * This API allows the client devices of IOMMU devices to resume the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * IOMMUs they control at runtime, before they can resume operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * System Resume will leverage the PM driver late callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) int omap_iommu_domain_activate(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct omap_iommu_device *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct omap_iommu *oiommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (!omap_domain->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) iommu = omap_domain->iommus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) oiommu = iommu->iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) pm_runtime_get_sync(oiommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) EXPORT_SYMBOL_GPL(omap_iommu_domain_activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * omap_iommu_runtime_suspend - disable an iommu device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * @dev: iommu device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * This function performs all that is necessary to disable an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * IOMMU device, either during final detachment from a client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * device, or during system/runtime suspend of the device. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * includes programming all the appropriate IOMMU registers, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * managing the associated omap_hwmod's state and the device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * reset line. This function also saves the context of any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * locked TLBs if suspending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct iommu_platform_data *pdata = dev_get_platdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) struct omap_iommu *obj = to_iommu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /* save the TLBs only during suspend, and not for power down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (obj->domain && obj->iopgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) omap_iommu_save_tlb_entries(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) omap2_iommu_disable(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (pdata && pdata->device_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) pdata->device_idle(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (pdata && pdata->assert_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) pdata->assert_reset(pdev, pdata->reset_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (pdata && pdata->set_pwrdm_constraint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * omap_iommu_runtime_resume - enable an iommu device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * @dev: iommu device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * This function performs all that is necessary to enable an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * IOMMU device, either during initial attachment to a client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * device, or during system/runtime resume of the device. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * includes programming all the appropriate IOMMU registers, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * managing the associated omap_hwmod's state and the device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * reset line. The function also restores any locked TLBs if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * resuming after a suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct iommu_platform_data *pdata = dev_get_platdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct omap_iommu *obj = to_iommu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (pdata && pdata->set_pwrdm_constraint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (pdata && pdata->deassert_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ret = pdata->deassert_reset(pdev, pdata->reset_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) dev_err(dev, "deassert_reset failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (pdata && pdata->device_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) pdata->device_enable(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /* restore the TLBs only during resume, and not for power up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (obj->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) omap_iommu_restore_tlb_entries(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) ret = omap2_iommu_enable(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * @dev: iommu device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * This function performs the necessary checks to determine if the IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * device needs suspending or not. The function checks if the runtime_pm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * status of the device is suspended, and returns 1 in that case. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * results in the PM core to skip invoking any of the Sleep PM callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * (suspend, suspend_late, resume, resume_early etc).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static int omap_iommu_prepare(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (pm_runtime_status_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static bool omap_iommu_can_register(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * restrict IOMMU core registration only for processor-port MDMA MMUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * on DRA7 DSPs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) (!strcmp(dev_name(&pdev->dev), "41501000.mmu")))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct omap_iommu *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) obj->syscfg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (IS_ERR(obj->syscfg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* can fail with -EPROBE_DEFER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) ret = PTR_ERR(obj->syscfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) &obj->id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (obj->id != 0 && obj->id != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dev_err(&pdev->dev, "invalid IOMMU instance id\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * OMAP Device MMU(IOMMU) detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static int omap_iommu_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) int err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) struct omap_iommu *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct device_node *of = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (!of) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) pr_err("%s: only DT-based devices are supported\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * self-manage the ordering dependencies between omap_device_enable/idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * and omap_device_assert/deassert_hardreset API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (pdev->dev.pm_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) dev_dbg(&pdev->dev, "device pm_domain is being reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) pdev->dev.pm_domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) obj->name = dev_name(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) obj->nr_tlb_entries = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (err && err != -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) obj->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) obj->ctx = (void *)obj + sizeof(*obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) obj->cr_ctx = devm_kzalloc(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) sizeof(*obj->cr_ctx) * obj->nr_tlb_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (!obj->cr_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) spin_lock_init(&obj->iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) spin_lock_init(&obj->page_table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) obj->regbase = devm_ioremap_resource(obj->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (IS_ERR(obj->regbase))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) return PTR_ERR(obj->regbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) dev_name(obj->dev), obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) platform_set_drvdata(pdev, obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (omap_iommu_can_register(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) obj->group = iommu_group_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (IS_ERR(obj->group))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) return PTR_ERR(obj->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) obj->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) goto out_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) iommu_device_set_fwnode(&obj->iommu, &of->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) err = iommu_device_register(&obj->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) goto out_sysfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) pm_runtime_enable(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) omap_iommu_debugfs_add(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) dev_info(&pdev->dev, "%s registered\n", obj->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) /* Re-probe bus to probe device attached to this IOMMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) bus_iommu_probe(&platform_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) out_sysfs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) iommu_device_sysfs_remove(&obj->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) out_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) iommu_group_put(obj->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) static int omap_iommu_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) struct omap_iommu *obj = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (obj->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) iommu_group_put(obj->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) obj->group = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) iommu_device_sysfs_remove(&obj->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) iommu_device_unregister(&obj->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) omap_iommu_debugfs_remove(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) pm_runtime_disable(obj->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) dev_info(&pdev->dev, "%s removed\n", obj->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) static const struct dev_pm_ops omap_iommu_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) .prepare = omap_iommu_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) pm_runtime_force_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) omap_iommu_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static const struct of_device_id omap_iommu_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) { .compatible = "ti,omap2-iommu" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) { .compatible = "ti,omap4-iommu" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) { .compatible = "ti,dra7-iommu" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) { .compatible = "ti,dra7-dsp-iommu" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) static struct platform_driver omap_iommu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) .probe = omap_iommu_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) .remove = omap_iommu_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) .name = "omap-iommu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) .pm = &omap_iommu_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) .of_match_table = of_match_ptr(omap_iommu_of_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) memset(e, 0, sizeof(*e));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) e->da = da;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) e->pa = pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) e->valid = MMU_CAM_V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) e->pgsz = pgsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) e->endian = MMU_RAM_ENDIAN_LITTLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) e->elsz = MMU_RAM_ELSZ_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) e->mixed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return iopgsz_to_bytes(e->pgsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct device *dev = omap_domain->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) struct omap_iommu_device *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) struct omap_iommu *oiommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) struct iotlb_entry e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) int omap_pgsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) u32 ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) omap_pgsz = bytes_to_iopgsz(bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (omap_pgsz < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) dev_err(dev, "invalid size to map: %zu\n", bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%zx\n", da, &pa, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) iotlb_init_entry(&e, da, pa, omap_pgsz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) iommu = omap_domain->iommus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) oiommu = iommu->iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) ret = omap_iopgtable_store_entry(oiommu, &e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) dev_err(dev, "omap_iopgtable_store_entry failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) iommu--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) oiommu = iommu->iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) iopgtable_clear_entry(oiommu, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) size_t size, struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct device *dev = omap_domain->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct omap_iommu_device *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) struct omap_iommu *oiommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) bool error = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) size_t bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) dev_dbg(dev, "unmapping da 0x%lx size %zu\n", da, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) iommu = omap_domain->iommus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) oiommu = iommu->iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) bytes = iopgtable_clear_entry(oiommu, da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (!bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) error = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * simplify return - we are only checking if any of the iommus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * reported an error, but not if all of them are unmapping the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * same number of entries. This should not occur due to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * mirror programming.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) return error ? 0 : bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) static int omap_iommu_count(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) while (arch_data->iommu_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) arch_data++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /* caller should call cleanup if this function fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static int omap_iommu_attach_init(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) struct omap_iommu_domain *odomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct omap_iommu_device *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) odomain->num_iommus = omap_iommu_count(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (!odomain->num_iommus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (!odomain->iommus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) iommu = odomain->iommus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) for (i = 0; i < odomain->num_iommus; i++, iommu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (!iommu->pgtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * should never fail, but please keep this around to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * we keep the hardware happy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) IOPGD_TABLE_SIZE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) struct omap_iommu_device *iommu = odomain->iommus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) kfree(iommu->pgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) kfree(odomain->iommus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) odomain->num_iommus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) odomain->iommus = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) struct omap_iommu_device *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) struct omap_iommu *oiommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (!arch_data || !arch_data->iommu_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) dev_err(dev, "device doesn't have an associated iommu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) spin_lock(&omap_domain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) /* only a single client device can be attached to a domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (omap_domain->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) dev_err(dev, "iommu domain is already attached\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) ret = omap_iommu_attach_init(dev, omap_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) dev_err(dev, "failed to allocate required iommu data %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) goto init_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) iommu = omap_domain->iommus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /* configure and enable the omap iommu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) oiommu = arch_data->iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) ret = omap_iommu_attach(oiommu, iommu->pgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) dev_err(dev, "can't get omap iommu: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) goto attach_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) oiommu->domain = domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) iommu->iommu_dev = oiommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) omap_domain->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) attach_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) iommu--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) arch_data--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) oiommu = iommu->iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) omap_iommu_detach(oiommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) iommu->iommu_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) oiommu->domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) init_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) omap_iommu_detach_fini(omap_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) spin_unlock(&omap_domain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) struct omap_iommu_device *iommu = omap_domain->iommus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct omap_iommu *oiommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (!omap_domain->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) dev_err(dev, "domain has no attached device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) /* only a single device is supported per domain for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if (omap_domain->dev != dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) dev_err(dev, "invalid attached device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * cleanup in the reverse order of attachment - this addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * any h/w dependencies between multiple instances, if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) iommu += (omap_domain->num_iommus - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) arch_data += (omap_domain->num_iommus - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) oiommu = iommu->iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) iopgtable_clear_entry_all(oiommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) omap_iommu_detach(oiommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) iommu->iommu_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) oiommu->domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) omap_iommu_detach_fini(omap_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) omap_domain->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) static void omap_iommu_detach_dev(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) spin_lock(&omap_domain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) _omap_iommu_detach_dev(omap_domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) spin_unlock(&omap_domain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) struct omap_iommu_domain *omap_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (type != IOMMU_DOMAIN_UNMANAGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (!omap_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) spin_lock_init(&omap_domain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) omap_domain->domain.geometry.aperture_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) omap_domain->domain.geometry.force_aperture = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return &omap_domain->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) static void omap_iommu_domain_free(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) * An iommu device is still attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * (currently, only one device can be attached) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) if (omap_domain->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) kfree(omap_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) dma_addr_t da)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) struct omap_iommu_device *iommu = omap_domain->iommus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) struct omap_iommu *oiommu = iommu->iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) struct device *dev = oiommu->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) u32 *pgd, *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) phys_addr_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * all the iommus within the domain will have identical programming,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * so perform the lookup using just the first iommu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (iopte_is_small(*pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) else if (iopte_is_large(*pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) (unsigned long long)da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (iopgd_is_section(*pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) else if (iopgd_is_super(*pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) (unsigned long long)da);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) static struct iommu_device *omap_iommu_probe_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) struct omap_iommu_arch_data *arch_data, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) struct omap_iommu *oiommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) int num_iommus, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * Allocate the per-device iommu structure for DT-based devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * TODO: Simplify this when removing non-DT support completely from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * IOMMU users.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (!dev->of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) * retrieve the count of IOMMU nodes using phandle size as element size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * since #iommu-cells = 0 for OMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) sizeof(phandle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (num_iommus < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (!arch_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) np = of_parse_phandle(dev->of_node, "iommus", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (!np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) kfree(arch_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) pdev = of_find_device_by_node(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (!pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) kfree(arch_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) oiommu = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) if (!oiommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) kfree(arch_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) tmp->iommu_dev = oiommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) tmp->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) dev_iommu_priv_set(dev, arch_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) * use the first IOMMU alone for the sysfs device linking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) * TODO: Evaluate if a single iommu_group needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) * maintained for both IOMMUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) oiommu = arch_data->iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) return &oiommu->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) static void omap_iommu_release_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (!dev->of_node || !arch_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) dev_iommu_priv_set(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) kfree(arch_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) static struct iommu_group *omap_iommu_device_group(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) struct iommu_group *group = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (!arch_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (arch_data->iommu_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) group = iommu_group_ref_get(arch_data->iommu_dev->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) static const struct iommu_ops omap_iommu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) .domain_alloc = omap_iommu_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) .domain_free = omap_iommu_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) .attach_dev = omap_iommu_attach_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) .detach_dev = omap_iommu_detach_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) .map = omap_iommu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) .unmap = omap_iommu_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) .iova_to_phys = omap_iommu_iova_to_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) .probe_device = omap_iommu_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) .release_device = omap_iommu_release_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) .device_group = omap_iommu_device_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) static int __init omap_iommu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) struct kmem_cache *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) const slab_flags_t flags = SLAB_HWCACHE_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) size_t align = 1 << 10; /* L2 pagetable alignement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) np = of_find_matching_node(NULL, omap_iommu_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) iopte_cachep = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) omap_iommu_debugfs_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) ret = platform_driver_register(&omap_iommu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) pr_err("%s: failed to register driver\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) goto fail_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) goto fail_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) fail_bus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) platform_driver_unregister(&omap_iommu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) fail_driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) kmem_cache_destroy(iopte_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) subsys_initcall(omap_iommu_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) /* must be ready before omap3isp is probed */