^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/io-pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "msm_iommu_hw-8xxx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "msm_iommu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define MRC(reg, processor, op1, crn, crm, op2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) __asm__ __volatile__ ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) : "=r" (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* bitmap of the page sizes currently supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static DEFINE_SPINLOCK(msm_iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static LIST_HEAD(qcom_iommu_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static struct iommu_ops msm_iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct msm_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct list_head list_attached;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct iommu_domain domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct io_pgtable_cfg cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct io_pgtable_ops *iop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) spinlock_t pgtlock; /* pagetable lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return container_of(dom, struct msm_priv, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static int __enable_clocks(struct msm_iommu_dev *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) ret = clk_enable(iommu->pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (iommu->clk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ret = clk_enable(iommu->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) clk_disable(iommu->pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static void __disable_clocks(struct msm_iommu_dev *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (iommu->clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) clk_disable(iommu->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) clk_disable(iommu->pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static void msm_iommu_reset(void __iomem *base, int ncb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) SET_RPUE(base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) SET_RPUEIE(base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) SET_ESRRESTORE(base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) SET_TBE(base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) SET_CR(base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) SET_SPDMBE(base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) SET_TESTBUSCR(base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) SET_TLBRSW(base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) SET_GLOBAL_TLBIALL(base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) SET_RPU_ACR(base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) SET_TLBLKCRWE(base, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) for (ctx = 0; ctx < ncb; ctx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) SET_BPRCOSH(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) SET_BPRCISH(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) SET_BPRCNSH(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) SET_BPSHCFG(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) SET_BPMTCFG(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) SET_ACTLR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) SET_SCTLR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) SET_FSRRESTORE(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) SET_TTBR0(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) SET_TTBR1(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) SET_TTBCR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) SET_BFBCR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) SET_PAR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) SET_FAR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) SET_CTX_TLBIALL(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) SET_TLBFLPTER(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) SET_TLBSLPTER(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) SET_TLBLKCR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) SET_CONTEXTIDR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static void __flush_iotlb(void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct msm_priv *priv = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct msm_iommu_dev *iommu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct msm_iommu_ctx_dev *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) list_for_each_entry(iommu, &priv->list_attached, dom_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ret = __enable_clocks(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) list_for_each_entry(master, &iommu->ctx_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) SET_CTX_TLBIALL(iommu->base, master->num, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) __disable_clocks(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static void __flush_iotlb_range(unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) size_t granule, bool leaf, void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct msm_priv *priv = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct msm_iommu_dev *iommu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct msm_iommu_ctx_dev *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int temp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) list_for_each_entry(iommu, &priv->list_attached, dom_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ret = __enable_clocks(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) list_for_each_entry(master, &iommu->ctx_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) temp_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) iova &= TLBIVA_VA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) iova |= GET_CONTEXTIDR_ASID(iommu->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) master->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) SET_TLBIVA(iommu->base, master->num, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) iova += granule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) } while (temp_size -= granule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) __disable_clocks(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void __flush_iotlb_walk(unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) size_t granule, void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) __flush_iotlb_range(iova, size, granule, false, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned long iova, size_t granule, void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) __flush_iotlb_range(iova, granule, granule, true, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static const struct iommu_flush_ops msm_iommu_flush_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) .tlb_flush_all = __flush_iotlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) .tlb_flush_walk = __flush_iotlb_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) .tlb_add_page = __flush_iotlb_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) idx = find_next_zero_bit(map, end, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (idx == end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) } while (test_and_set_bit(idx, map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static void msm_iommu_free_ctx(unsigned long *map, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) clear_bit(idx, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static void config_mids(struct msm_iommu_dev *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct msm_iommu_ctx_dev *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int mid, ctx, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) for (i = 0; i < master->num_mids; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) mid = master->mids[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ctx = master->num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) SET_M2VCBR_N(iommu->base, mid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) SET_CBACR_N(iommu->base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Set VMID = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) SET_VMID(iommu->base, mid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* Set the context number for that MID to this context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) SET_CBNDX(iommu->base, mid, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* Set MID associated with this context bank to 0*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) SET_CBVMID(iommu->base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* Set the ASID for TLB tagging for this context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Set security bit override to be Non-secure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) SET_NSCFG(iommu->base, mid, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void __reset_context(void __iomem *base, int ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) SET_BPRCOSH(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) SET_BPRCISH(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) SET_BPRCNSH(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) SET_BPSHCFG(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) SET_BPMTCFG(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) SET_ACTLR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) SET_SCTLR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) SET_FSRRESTORE(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) SET_TTBR0(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) SET_TTBR1(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) SET_TTBCR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) SET_BFBCR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) SET_PAR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) SET_FAR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) SET_CTX_TLBIALL(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) SET_TLBFLPTER(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) SET_TLBSLPTER(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) SET_TLBLKCR(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void __program_context(void __iomem *base, int ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct msm_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) __reset_context(base, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* Turn on TEX Remap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) SET_TRE(base, ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) SET_AFE(base, ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Set up HTW mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* TLB miss configuration: perform HTW on miss */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) SET_TLBMCFG(base, ctx, 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* V2P configuration: HTW for access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) SET_V2PCFG(base, ctx, 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) SET_TTBR1(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Set prrr and nmrr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* Invalidate the TLB for this context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) SET_CTX_TLBIALL(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* Set interrupt number to "secure" interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) SET_IRPTNDX(base, ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* Enable context fault interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) SET_CFEIE(base, ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* Stall access on a context fault and let the handler deal with it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) SET_CFCFG(base, ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* Redirect all cacheable requests to L2 slave port. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) SET_RCISH(base, ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) SET_RCOSH(base, ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) SET_RCNSH(base, ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* Turn on BFB prefetch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) SET_BFBDFE(base, ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Enable the MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) SET_M(base, ctx, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct msm_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (type != IOMMU_DOMAIN_UNMANAGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) priv = kzalloc(sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) goto fail_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) INIT_LIST_HEAD(&priv->list_attached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) priv->domain.geometry.aperture_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) priv->domain.geometry.force_aperture = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return &priv->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) fail_nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static void msm_iommu_domain_free(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct msm_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) spin_lock_irqsave(&msm_iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) priv = to_msm_priv(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) spin_unlock_irqrestore(&msm_iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static int msm_iommu_domain_config(struct msm_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) spin_lock_init(&priv->pgtlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) priv->cfg = (struct io_pgtable_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) .ias = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) .oas = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) .tlb = &msm_iommu_flush_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) .iommu_dev = priv->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (!priv->iop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) dev_err(priv->dev, "Failed to allocate pgtable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* Must be called under msm_iommu_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct msm_iommu_dev *iommu, *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct msm_iommu_ctx_dev *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) master = list_first_entry(&iommu->ctx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct msm_iommu_ctx_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (master->of_node == dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ret = iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static struct iommu_device *msm_iommu_probe_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct msm_iommu_dev *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) spin_lock_irqsave(&msm_iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) iommu = find_iommu_for_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) spin_unlock_irqrestore(&msm_iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return &iommu->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static void msm_iommu_release_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct msm_iommu_dev *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct msm_priv *priv = to_msm_priv(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct msm_iommu_ctx_dev *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) priv->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) msm_iommu_domain_config(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) spin_lock_irqsave(&msm_iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) master = list_first_entry(&iommu->ctx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct msm_iommu_ctx_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (master->of_node == dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ret = __enable_clocks(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) list_for_each_entry(master, &iommu->ctx_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (master->num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) dev_err(dev, "domain already attached");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ret = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) master->num =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) msm_iommu_alloc_ctx(iommu->context_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 0, iommu->ncb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (IS_ERR_VALUE(master->num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) config_mids(iommu, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) __program_context(iommu->base, master->num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) __disable_clocks(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) list_add(&iommu->dom_node, &priv->list_attached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) spin_unlock_irqrestore(&msm_iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static void msm_iommu_detach_dev(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct msm_priv *priv = to_msm_priv(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct msm_iommu_dev *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct msm_iommu_ctx_dev *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) free_io_pgtable_ops(priv->iop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) spin_lock_irqsave(&msm_iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) list_for_each_entry(iommu, &priv->list_attached, dom_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ret = __enable_clocks(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) list_for_each_entry(master, &iommu->ctx_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) msm_iommu_free_ctx(iommu->context_map, master->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) __reset_context(iommu->base, master->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) __disable_clocks(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) spin_unlock_irqrestore(&msm_iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) phys_addr_t pa, size_t len, int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct msm_priv *priv = to_msm_priv(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) spin_lock_irqsave(&priv->pgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) spin_unlock_irqrestore(&priv->pgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct msm_priv *priv = to_msm_priv(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) __flush_iotlb_range(iova, size, SZ_4K, false, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) size_t len, struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct msm_priv *priv = to_msm_priv(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) spin_lock_irqsave(&priv->pgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) len = priv->iop->unmap(priv->iop, iova, len, gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) spin_unlock_irqrestore(&priv->pgtlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) dma_addr_t va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) struct msm_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct msm_iommu_dev *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct msm_iommu_ctx_dev *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) unsigned int par;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) phys_addr_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) spin_lock_irqsave(&msm_iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) priv = to_msm_priv(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) iommu = list_first_entry(&priv->list_attached,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct msm_iommu_dev, dom_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (list_empty(&iommu->ctx_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) master = list_first_entry(&iommu->ctx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct msm_iommu_ctx_dev, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (!master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ret = __enable_clocks(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* Invalidate context TLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) SET_CTX_TLBIALL(iommu->base, master->num, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) par = GET_PAR(iommu->base, master->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /* We are dealing with a supersection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (GET_NOFAULT_SS(iommu->base, master->num))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) else /* Upper 20 bits from PAR, lower 12 from VA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (GET_FAULT(iommu->base, master->num))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) __disable_clocks(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) spin_unlock_irqrestore(&msm_iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static bool msm_iommu_capable(enum iommu_cap cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static void print_ctx_regs(void __iomem *base, int ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) unsigned int fsr = GET_FSR(base, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) pr_err("FAR = %08x PAR = %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) GET_FAR(base, ctx), GET_PAR(base, ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) (fsr & 0x02) ? "TF " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) (fsr & 0x04) ? "AFF " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) (fsr & 0x08) ? "APF " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) (fsr & 0x10) ? "TLBMF " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) (fsr & 0x20) ? "HTWDEEF " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) (fsr & 0x40) ? "HTWSEEF " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) (fsr & 0x80) ? "MHF " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) (fsr & 0x10000) ? "SL " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) (fsr & 0x40000000) ? "SS " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) (fsr & 0x80000000) ? "MULTI " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) pr_err("TTBR0 = %08x TTBR1 = %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) pr_err("SCTLR = %08x ACTLR = %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static void insert_iommu_master(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct msm_iommu_dev **iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct of_phandle_args *spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (list_empty(&(*iommu)->ctx_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) master = kzalloc(sizeof(*master), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) master->of_node = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) list_add(&master->list, &(*iommu)->ctx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) dev_iommu_priv_set(dev, master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) for (sid = 0; sid < master->num_mids; sid++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (master->mids[sid] == spec->args[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) master->mids[master->num_mids++] = spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static int qcom_iommu_of_xlate(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct of_phandle_args *spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct msm_iommu_dev *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) spin_lock_irqsave(&msm_iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (iommu->dev->of_node == spec->np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!iommu || iommu->dev->of_node != spec->np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) insert_iommu_master(dev, &iommu, spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) spin_unlock_irqrestore(&msm_iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct msm_iommu_dev *iommu = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) unsigned int fsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) spin_lock(&msm_iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (!iommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) pr_err("Invalid device ID in context interrupt handler\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) pr_err("Unexpected IOMMU page fault!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) pr_err("base = %08x\n", (unsigned int)iommu->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) ret = __enable_clocks(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) for (i = 0; i < iommu->ncb; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) fsr = GET_FSR(iommu->base, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (fsr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) pr_err("Fault occurred in context %d.\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) pr_err("Interesting registers:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) print_ctx_regs(iommu->base, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) SET_FSR(iommu->base, i, 0x4000000F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) __disable_clocks(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) spin_unlock(&msm_iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static struct iommu_ops msm_iommu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) .capable = msm_iommu_capable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) .domain_alloc = msm_iommu_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) .domain_free = msm_iommu_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) .attach_dev = msm_iommu_attach_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) .detach_dev = msm_iommu_detach_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) .map = msm_iommu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) .unmap = msm_iommu_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * Nothing is needed here, the barrier to guarantee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * completion of the tlb sync operation is implicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * taken care when the iommu client does a writel before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * kick starting the other master.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) .iotlb_sync = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) .iotlb_sync_map = msm_iommu_sync_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) .iova_to_phys = msm_iommu_iova_to_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) .probe_device = msm_iommu_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) .release_device = msm_iommu_release_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) .device_group = generic_device_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) .pgsize_bitmap = MSM_IOMMU_PGSIZES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) .of_xlate = qcom_iommu_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static int msm_iommu_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct resource *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) resource_size_t ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct msm_iommu_dev *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) int ret, par, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) iommu->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) INIT_LIST_HEAD(&iommu->ctx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (IS_ERR(iommu->pclk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) dev_err(iommu->dev, "could not get smmu_pclk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return PTR_ERR(iommu->pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) ret = clk_prepare(iommu->pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) dev_err(iommu->dev, "could not prepare smmu_pclk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (IS_ERR(iommu->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) dev_err(iommu->dev, "could not get iommu_clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) clk_unprepare(iommu->pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return PTR_ERR(iommu->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ret = clk_prepare(iommu->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) dev_err(iommu->dev, "could not prepare iommu_clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) clk_unprepare(iommu->pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) iommu->base = devm_ioremap_resource(iommu->dev, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (IS_ERR(iommu->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) dev_err(iommu->dev, "could not get iommu base\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ret = PTR_ERR(iommu->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ioaddr = r->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) iommu->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (iommu->irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) dev_err(iommu->dev, "could not get ncb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) iommu->ncb = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) msm_iommu_reset(iommu->base, iommu->ncb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) SET_M(iommu->base, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) SET_PAR(iommu->base, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) SET_V2PCFG(iommu->base, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) SET_V2PPR(iommu->base, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) par = GET_PAR(iommu->base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) SET_V2PCFG(iommu->base, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) SET_M(iommu->base, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (!par) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) pr_err("Invalid PAR value detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) msm_iommu_fault_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) IRQF_ONESHOT | IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) "msm_iommu_secure_irpt_handler",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) list_add(&iommu->dev_node, &qcom_iommu_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) "msm-smmu.%pa", &ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ret = iommu_device_register(&iommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) pr_info("device mapped at %p, irq %d with %d ctx banks\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) iommu->base, iommu->irq, iommu->ncb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) clk_unprepare(iommu->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) clk_unprepare(iommu->pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) static const struct of_device_id msm_iommu_dt_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) { .compatible = "qcom,apq8064-iommu" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static int msm_iommu_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) clk_unprepare(iommu->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) clk_unprepare(iommu->pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static struct platform_driver msm_iommu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) .name = "msm_iommu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) .of_match_table = msm_iommu_dt_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) .probe = msm_iommu_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) .remove = msm_iommu_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) static int __init msm_iommu_driver_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) ret = platform_driver_register(&msm_iommu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) pr_err("Failed to register IOMMU driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) subsys_initcall(msm_iommu_driver_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)