^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * IOMMU API for Graphics Address Relocation Table on Tegra20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Hiroshi DOYU <hdoyu@nvidia.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define dev_fmt(fmt) "gart: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <soc/tegra/mc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define GART_REG_BASE 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define GART_CONFIG (0x24 - GART_REG_BASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define GART_ENTRY_PHYS_ADDR_VALID BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define GART_PAGE_SHIFT 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define GART_PAGE_MASK GENMASK(30, GART_PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* bitmap of the page sizes currently supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define GART_IOMMU_PGSIZES (GART_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct gart_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) u32 *savedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long iovmm_base; /* offset to vmm_area start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long iovmm_end; /* offset to vmm_area end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) spinlock_t pte_lock; /* for pagetable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) spinlock_t dom_lock; /* for active domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned int active_devices; /* number of active devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct iommu_domain *active_domain; /* current active domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct iommu_device iommu; /* IOMMU Core handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static struct gart_device *gart_handle; /* unique for a system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static bool gart_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Any interaction between any block on PPSB and a block on APB or AHB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * must have these read-back to ensure the APB/AHB bus transaction is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * complete before initiating activity on the PPSB block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define FLUSH_GART_REGS(gart) readl_relaxed((gart)->regs + GART_CONFIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define for_each_gart_pte(gart, iova) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) for (iova = gart->iovmm_base; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) iova < gart->iovmm_end; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) iova += GART_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static inline void gart_set_pte(struct gart_device *gart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned long iova, unsigned long pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) writel_relaxed(pte, gart->regs + GART_ENTRY_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static inline unsigned long gart_read_pte(struct gart_device *gart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned long iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned long pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) pte = readl_relaxed(gart->regs + GART_ENTRY_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static void do_gart_setup(struct gart_device *gart, const u32 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) unsigned long iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) for_each_gart_pte(gart, iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) gart_set_pte(gart, iova, data ? *(data++) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) writel_relaxed(1, gart->regs + GART_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) FLUSH_GART_REGS(gart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static inline bool gart_iova_range_invalid(struct gart_device *gart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned long iova, size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) iova + bytes > gart->iovmm_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static int gart_iommu_attach_dev(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct gart_device *gart = gart_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) spin_lock(&gart->dom_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (gart->active_domain && gart->active_domain != domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) } else if (dev_iommu_priv_get(dev) != domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) dev_iommu_priv_set(dev, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) gart->active_domain = domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) gart->active_devices++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) spin_unlock(&gart->dom_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static void gart_iommu_detach_dev(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct gart_device *gart = gart_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) spin_lock(&gart->dom_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (dev_iommu_priv_get(dev) == domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) dev_iommu_priv_set(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (--gart->active_devices == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) gart->active_domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) spin_unlock(&gart->dom_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (type != IOMMU_DOMAIN_UNMANAGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) domain = kzalloc(sizeof(*domain), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) domain->geometry.aperture_start = gart_handle->iovmm_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) domain->geometry.aperture_end = gart_handle->iovmm_end - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) domain->geometry.force_aperture = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static void gart_iommu_domain_free(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) WARN_ON(gart_handle->active_domain == domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) kfree(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned long pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (unlikely(gart_debug && gart_pte_valid(gart, iova))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) dev_err(gart->dev, "Page entry is in-use\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct gart_device *gart = gart_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (gart_iova_range_invalid(gart, iova, bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) spin_lock(&gart->pte_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ret = __gart_iommu_map(gart, iova, (unsigned long)pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) spin_unlock(&gart->pte_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static inline int __gart_iommu_unmap(struct gart_device *gart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) unsigned long iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) dev_err(gart->dev, "Page entry is invalid\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) gart_set_pte(gart, iova, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) size_t bytes, struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct gart_device *gart = gart_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (gart_iova_range_invalid(gart, iova, bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) spin_lock(&gart->pte_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) err = __gart_iommu_unmap(gart, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) spin_unlock(&gart->pte_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return err ? 0 : bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct gart_device *gart = gart_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned long pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) spin_lock(&gart->pte_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) pte = gart_read_pte(gart, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) spin_unlock(&gart->pte_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return pte & GART_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static bool gart_iommu_capable(enum iommu_cap cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static struct iommu_device *gart_iommu_probe_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (!dev_iommu_fwspec_get(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return &gart_handle->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void gart_iommu_release_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static int gart_iommu_of_xlate(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct of_phandle_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static void gart_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) FLUSH_GART_REGS(gart_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static void gart_iommu_sync(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) size_t length = gather->end - gather->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) gart_iommu_sync_map(domain, gather->start, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static const struct iommu_ops gart_iommu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) .capable = gart_iommu_capable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) .domain_alloc = gart_iommu_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) .domain_free = gart_iommu_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) .attach_dev = gart_iommu_attach_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) .detach_dev = gart_iommu_detach_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) .probe_device = gart_iommu_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) .release_device = gart_iommu_release_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) .device_group = generic_device_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) .map = gart_iommu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) .unmap = gart_iommu_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) .iova_to_phys = gart_iommu_iova_to_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) .pgsize_bitmap = GART_IOMMU_PGSIZES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) .of_xlate = gart_iommu_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) .iotlb_sync_map = gart_iommu_sync_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) .iotlb_sync = gart_iommu_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) int tegra_gart_suspend(struct gart_device *gart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) u32 *data = gart->savedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) unsigned long iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * All GART users shall be suspended at this point. Disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * address translation to trap all GART accesses as invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * memory accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) writel_relaxed(0, gart->regs + GART_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) FLUSH_GART_REGS(gart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) for_each_gart_pte(gart, iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) *(data++) = gart_read_pte(gart, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) int tegra_gart_resume(struct gart_device *gart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) do_gart_setup(gart, gart->savedata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct gart_device *gart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* the GART memory aperture is required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) dev_err(dev, "Memory aperture resource unavailable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return ERR_PTR(-ENXIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) gart = kzalloc(sizeof(*gart), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!gart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) gart_handle = gart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) gart->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) gart->regs = mc->regs + GART_REG_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) gart->iovmm_base = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) gart->iovmm_end = res->end + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) spin_lock_init(&gart->pte_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) spin_lock_init(&gart->dom_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) do_gart_setup(gart, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) goto free_gart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) iommu_device_set_fwnode(&gart->iommu, dev->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) err = iommu_device_register(&gart->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) goto remove_sysfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) gart->savedata = vmalloc(resource_size(res) / GART_PAGE_SIZE *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!gart->savedata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) goto unregister_iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return gart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) unregister_iommu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) iommu_device_unregister(&gart->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) remove_sysfs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) iommu_device_sysfs_remove(&gart->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) free_gart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) kfree(gart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) module_param(gart_debug, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) MODULE_PARM_DESC(gart_debug, "Enable GART debugging");