Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) 2011-2014 NVIDIA CORPORATION.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <soc/tegra/ahb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <soc/tegra/mc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) struct tegra_smmu_group {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 	struct tegra_smmu *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 	const struct tegra_smmu_group_soc *soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 	struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 	unsigned int swgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) struct tegra_smmu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	struct tegra_mc *mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	const struct tegra_smmu_soc *soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	struct list_head groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	unsigned long pfn_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	unsigned long tlb_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	unsigned long *asids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	struct dentry *debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	struct iommu_device iommu;	/* IOMMU Core code handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) struct tegra_smmu_as {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	struct iommu_domain domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	struct tegra_smmu *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	unsigned int use_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	u32 *count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	struct page **pts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	struct page *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	dma_addr_t pd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	unsigned id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	u32 attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	return container_of(dom, struct tegra_smmu_as, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 			       unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	writel(value, smmu->regs + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	return readl(smmu->regs + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #define SMMU_CONFIG 0x010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define  SMMU_CONFIG_ENABLE (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define SMMU_TLB_CONFIG 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define  SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define  SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define  SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define SMMU_PTC_CONFIG 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define  SMMU_PTC_CONFIG_ENABLE (1 << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define  SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define  SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define SMMU_PTB_ASID 0x01c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define  SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define SMMU_PTB_DATA 0x020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define  SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define SMMU_TLB_FLUSH 0x030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define  SMMU_TLB_FLUSH_VA_MATCH_ALL     (0 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define  SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #define  SMMU_TLB_FLUSH_VA_MATCH_GROUP   (3 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) #define  SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 					  SMMU_TLB_FLUSH_VA_MATCH_SECTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) #define  SMMU_TLB_FLUSH_VA_GROUP(addr)   ((((addr) & 0xffffc000) >> 12) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 					  SMMU_TLB_FLUSH_VA_MATCH_GROUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) #define  SMMU_TLB_FLUSH_ASID_MATCH       (1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define SMMU_PTC_FLUSH 0x034
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define  SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) #define  SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) #define SMMU_PTC_FLUSH_HI 0x9b8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) #define  SMMU_PTC_FLUSH_HI_MASK 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) /* per-SWGROUP SMMU_*_ASID register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) #define SMMU_ASID_ENABLE (1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #define SMMU_ASID_MASK 0x7f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) /* page table definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) #define SMMU_NUM_PDE 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #define SMMU_NUM_PTE 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) #define SMMU_PDE_SHIFT 22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) #define SMMU_PTE_SHIFT 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define SMMU_PAGE_MASK		(~(SMMU_SIZE_PT-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) #define SMMU_OFFSET_IN_PAGE(x)	((unsigned long)(x) & ~SMMU_PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) #define SMMU_PFN_PHYS(x)	((phys_addr_t)(x) << SMMU_PTE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) #define SMMU_PHYS_PFN(x)	((unsigned long)((x) >> SMMU_PTE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #define SMMU_PD_READABLE	(1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) #define SMMU_PD_WRITABLE	(1 << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) #define SMMU_PD_NONSECURE	(1 << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) #define SMMU_PDE_READABLE	(1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) #define SMMU_PDE_WRITABLE	(1 << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define SMMU_PDE_NONSECURE	(1 << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) #define SMMU_PDE_NEXT		(1 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) #define SMMU_PTE_READABLE	(1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) #define SMMU_PTE_WRITABLE	(1 << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) #define SMMU_PTE_NONSECURE	(1 << 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) #define SMMU_PDE_ATTR		(SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 				 SMMU_PDE_NONSECURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) static unsigned int iova_pd_index(unsigned long iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) static unsigned int iova_pt_index(unsigned long iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	addr >>= 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	return (addr & smmu->pfn_mask) == addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 				  unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	offset &= ~(smmu->mc->soc->atom_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	if (smmu->mc->soc->num_address_bits > 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	smmu_writel(smmu, value, SMMU_PTC_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 				       unsigned long asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	if (smmu->soc->num_asids == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		value = (asid & 0x3) << 29;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		value = (asid & 0x7f) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 					  unsigned long asid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 					  unsigned long iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	if (smmu->soc->num_asids == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		value = (asid & 0x3) << 29;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		value = (asid & 0x7f) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 					unsigned long asid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 					unsigned long iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	if (smmu->soc->num_asids == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		value = (asid & 0x3) << 29;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		value = (asid & 0x7f) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	smmu_writel(smmu, value, SMMU_TLB_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) static inline void smmu_flush(struct tegra_smmu *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	smmu_readl(smmu, SMMU_PTB_ASID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	unsigned long id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	mutex_lock(&smmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	if (id >= smmu->soc->num_asids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		mutex_unlock(&smmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	set_bit(id, smmu->asids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	*idp = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	mutex_unlock(&smmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	mutex_lock(&smmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	clear_bit(id, smmu->asids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	mutex_unlock(&smmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) static bool tegra_smmu_capable(enum iommu_cap cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	struct tegra_smmu_as *as;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	if (type != IOMMU_DOMAIN_UNMANAGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	as = kzalloc(sizeof(*as), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	if (!as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	if (!as->pd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		kfree(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	if (!as->count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		__free_page(as->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		kfree(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	if (!as->pts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		kfree(as->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		__free_page(as->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		kfree(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	spin_lock_init(&as->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	/* setup aperture */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	as->domain.geometry.aperture_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	as->domain.geometry.aperture_end = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	as->domain.geometry.force_aperture = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	return &as->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) static void tegra_smmu_domain_free(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	struct tegra_smmu_as *as = to_smmu_as(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	/* TODO: free page directory and page tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	WARN_ON_ONCE(as->use_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	kfree(as->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	kfree(as->pts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	kfree(as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) static const struct tegra_smmu_swgroup *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	const struct tegra_smmu_swgroup *group = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	for (i = 0; i < smmu->soc->num_swgroups; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		if (smmu->soc->swgroups[i].swgroup == swgroup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 			group = &smmu->soc->swgroups[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			      unsigned int asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	const struct tegra_smmu_swgroup *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	group = tegra_smmu_find_swgroup(smmu, swgroup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	if (group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		value = smmu_readl(smmu, group->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		value &= ~SMMU_ASID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		value |= SMMU_ASID_VALUE(asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		value |= SMMU_ASID_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		smmu_writel(smmu, value, group->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		pr_warn("%s group from swgroup %u not found\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 				swgroup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		/* No point moving ahead if group was not found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	for (i = 0; i < smmu->soc->num_clients; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		const struct tegra_mc_client *client = &smmu->soc->clients[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		if (client->swgroup != swgroup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		value = smmu_readl(smmu, client->smmu.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		value |= BIT(client->smmu.bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		smmu_writel(smmu, value, client->smmu.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			       unsigned int asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	const struct tegra_smmu_swgroup *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	group = tegra_smmu_find_swgroup(smmu, swgroup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	if (group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		value = smmu_readl(smmu, group->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		value &= ~SMMU_ASID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		value |= SMMU_ASID_VALUE(asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		value &= ~SMMU_ASID_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		smmu_writel(smmu, value, group->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	for (i = 0; i < smmu->soc->num_clients; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		const struct tegra_mc_client *client = &smmu->soc->clients[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		if (client->swgroup != swgroup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		value = smmu_readl(smmu, client->smmu.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		value &= ~BIT(client->smmu.bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		smmu_writel(smmu, value, client->smmu.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 				 struct tegra_smmu_as *as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	if (as->use_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		as->use_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 				  DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	if (dma_mapping_error(smmu->dev, as->pd_dma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	/* We can't handle 64-bit DMA addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		goto err_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	err = tegra_smmu_alloc_asid(smmu, &as->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		goto err_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	smmu_flush_ptc(smmu, as->pd_dma, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	smmu_flush_tlb_asid(smmu, as->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	smmu_writel(smmu, value, SMMU_PTB_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	smmu_flush(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	as->smmu = smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	as->use_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) err_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 				    struct tegra_smmu_as *as)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	if (--as->use_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	tegra_smmu_free_asid(smmu, as->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	as->smmu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) static int tegra_smmu_attach_dev(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 				 struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	struct tegra_smmu_as *as = to_smmu_as(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	struct of_phandle_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	unsigned int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 					   &args)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		unsigned int swgroup = args.args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		if (args.np != smmu->dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 			of_node_put(args.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		of_node_put(args.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		err = tegra_smmu_as_prepare(smmu, as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		tegra_smmu_enable(smmu, swgroup, as->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	if (index == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	struct tegra_smmu_as *as = to_smmu_as(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	struct tegra_smmu *smmu = as->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	struct of_phandle_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	unsigned int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 					   &args)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		unsigned int swgroup = args.args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		if (args.np != smmu->dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			of_node_put(args.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		of_node_put(args.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		tegra_smmu_disable(smmu, swgroup, as->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		tegra_smmu_as_unprepare(smmu, as);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			       u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	unsigned int pd_index = iova_pd_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	struct tegra_smmu *smmu = as->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	u32 *pd = page_address(as->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	unsigned long offset = pd_index * sizeof(*pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	/* Set the page directory entry first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	pd[pd_index] = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	/* The flush the page directory entry from caches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 					 sizeof(*pd), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	/* And flush the iommu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	smmu_flush_ptc(smmu, as->pd_dma, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	smmu_flush_tlb_section(smmu, as->id, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	smmu_flush(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	u32 *pt = page_address(pt_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	return pt + iova_pt_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 				  dma_addr_t *dmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	unsigned int pd_index = iova_pd_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	struct tegra_smmu *smmu = as->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	struct page *pt_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	u32 *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	pt_page = as->pts[pd_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	if (!pt_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	pd = page_address(as->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	*dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	return tegra_smmu_pte_offset(pt_page, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		       dma_addr_t *dmap, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	unsigned int pde = iova_pd_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	struct tegra_smmu *smmu = as->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	if (!as->pts[pde]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 				   DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		if (dma_mapping_error(smmu->dev, dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			__free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		if (!smmu_dma_addr_valid(smmu, dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 			dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 				       DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 			__free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		as->pts[pde] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 							      SMMU_PDE_NEXT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		*dmap = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		u32 *pd = page_address(as->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		*dmap = smmu_pde_to_dma(smmu, pd[pde]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	return tegra_smmu_pte_offset(as->pts[pde], iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	unsigned int pd_index = iova_pd_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	as->count[pd_index]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	unsigned int pde = iova_pd_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	struct page *page = as->pts[pde];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	 * When no entries in this page table are used anymore, return the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	 * memory page to the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	if (--as->count[pde] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		struct tegra_smmu *smmu = as->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		u32 *pd = page_address(as->pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		tegra_smmu_set_pde(as, iova, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		__free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		as->pts[pde] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			       u32 *pte, dma_addr_t pte_dma, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	struct tegra_smmu *smmu = as->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	unsigned long offset = SMMU_OFFSET_IN_PAGE(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	*pte = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 					 4, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	smmu_flush_ptc(smmu, pte_dma, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	smmu_flush_tlb_group(smmu, as->id, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	smmu_flush(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) static struct page *as_get_pde_page(struct tegra_smmu_as *as,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 				    unsigned long iova, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 				    unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	unsigned int pde = iova_pd_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	struct page *page = as->pts[pde];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	/* at first check whether allocation needs to be done at all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	 * In order to prevent exhaustion of the atomic memory pool, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	 * allocate page in a sleeping context if GFP flags permit. Hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	 * spinlock needs to be unlocked and re-locked after allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	if (!(gfp & __GFP_ATOMIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		spin_unlock_irqrestore(&as->lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	if (!(gfp & __GFP_ATOMIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		spin_lock_irqsave(&as->lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	 * In a case of blocking allocation, a concurrent mapping may win
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	 * the PDE allocation. In this case the allocated page isn't needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	 * if allocation succeeded and the allocation failure isn't fatal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	if (as->pts[pde]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			__free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		page = as->pts[pde];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) __tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		 phys_addr_t paddr, size_t size, int prot, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		 unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	struct tegra_smmu_as *as = to_smmu_as(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	dma_addr_t pte_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	u32 pte_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	u32 *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	page = as_get_pde_page(as, iova, gfp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	pte = as_get_pte(as, iova, &pte_dma, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	if (!pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	/* If we aren't overwriting a pre-existing entry, increment use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	if (*pte == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		tegra_smmu_pte_get_use(as, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	pte_attrs = SMMU_PTE_NONSECURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	if (prot & IOMMU_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		pte_attrs |= SMMU_PTE_READABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	if (prot & IOMMU_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		pte_attrs |= SMMU_PTE_WRITABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	tegra_smmu_set_pte(as, iova, pte, pte_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 			   SMMU_PHYS_PFN(paddr) | pte_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) static size_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) __tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		   size_t size, struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	struct tegra_smmu_as *as = to_smmu_as(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	dma_addr_t pte_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	u32 *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (!pte || !*pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	tegra_smmu_pte_put_use(as, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			  phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	struct tegra_smmu_as *as = to_smmu_as(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	spin_lock_irqsave(&as->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	spin_unlock_irqrestore(&as->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			       size_t size, struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	struct tegra_smmu_as *as = to_smmu_as(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	spin_lock_irqsave(&as->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	size = __tegra_smmu_unmap(domain, iova, size, gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	spin_unlock_irqrestore(&as->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 					   dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	struct tegra_smmu_as *as = to_smmu_as(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	dma_addr_t pte_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	u32 *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	if (!pte || !*pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	pfn = *pte & as->smmu->pfn_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	struct tegra_mc *mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	pdev = of_find_device_by_node(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	if (!pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	mc = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	if (!mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	return mc->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 				struct of_phandle_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	const struct iommu_ops *ops = smmu->iommu.ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		dev_err(dev, "failed to initialize fwspec: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	err = ops->of_xlate(dev, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		dev_err(dev, "failed to parse SW group ID: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		iommu_fwspec_free(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	struct tegra_smmu *smmu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	struct of_phandle_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	unsigned int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 					  &args) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		smmu = tegra_smmu_find(args.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		if (smmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			err = tegra_smmu_configure(smmu, dev, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			of_node_put(args.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 			if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 				return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 			 * Only a single IOMMU master interface is currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 			 * supported by the Linux kernel, so abort after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 			 * first match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			dev_iommu_priv_set(dev, smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		of_node_put(args.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	if (!smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	return &smmu->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) static void tegra_smmu_release_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	dev_iommu_priv_set(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) static const struct tegra_smmu_group_soc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	for (i = 0; i < smmu->soc->num_groups; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			if (smmu->soc->groups[i].swgroups[j] == swgroup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 				return &smmu->soc->groups[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) static void tegra_smmu_group_release(void *iommu_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	struct tegra_smmu_group *group = iommu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	struct tegra_smmu *smmu = group->smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	mutex_lock(&smmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	list_del(&group->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	mutex_unlock(&smmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 						unsigned int swgroup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	const struct tegra_smmu_group_soc *soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	struct tegra_smmu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	struct iommu_group *grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	/* Find group_soc associating with swgroup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	soc = tegra_smmu_find_group(smmu, swgroup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	mutex_lock(&smmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	/* Find existing iommu_group associating with swgroup or group_soc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	list_for_each_entry(group, &smmu->groups, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		if ((group->swgroup == swgroup) || (soc && group->soc == soc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			grp = iommu_group_ref_get(group->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 			mutex_unlock(&smmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			return grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	if (!group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		mutex_unlock(&smmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	INIT_LIST_HEAD(&group->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	group->swgroup = swgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	group->smmu = smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	group->soc = soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	group->group = iommu_group_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	if (IS_ERR(group->group)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		devm_kfree(smmu->dev, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		mutex_unlock(&smmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	if (soc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		iommu_group_set_name(group->group, soc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	list_add_tail(&group->list, &smmu->groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	mutex_unlock(&smmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	return group->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) static struct iommu_group *tegra_smmu_device_group(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	group = tegra_smmu_group_get(smmu, fwspec->ids[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		group = generic_device_group(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) static int tegra_smmu_of_xlate(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			       struct of_phandle_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	u32 id = args->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	return iommu_fwspec_add_ids(dev, &id, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) static const struct iommu_ops tegra_smmu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	.capable = tegra_smmu_capable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	.domain_alloc = tegra_smmu_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	.domain_free = tegra_smmu_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	.attach_dev = tegra_smmu_attach_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	.detach_dev = tegra_smmu_detach_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	.probe_device = tegra_smmu_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	.release_device = tegra_smmu_release_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	.device_group = tegra_smmu_device_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	.map = tegra_smmu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	.unmap = tegra_smmu_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	.iova_to_phys = tegra_smmu_iova_to_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	.of_xlate = tegra_smmu_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	.pgsize_bitmap = SZ_4K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) static void tegra_smmu_ahb_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	static const struct of_device_id ahb_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		{ .compatible = "nvidia,tegra30-ahb", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	struct device_node *ahb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	ahb = of_find_matching_node(NULL, ahb_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	if (ahb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		tegra_ahb_enable_smmu(ahb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		of_node_put(ahb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	struct tegra_smmu *smmu = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	seq_printf(s, "swgroup    enabled  ASID\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	seq_printf(s, "------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	for (i = 0; i < smmu->soc->num_swgroups; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		const char *status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		unsigned int asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		value = smmu_readl(smmu, group->reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		if (value & SMMU_ASID_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			status = "yes";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			status = "no";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		asid = value & SMMU_ASID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		seq_printf(s, "%-9s  %-7s  %#04x\n", group->name, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			   asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static int tegra_smmu_clients_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	struct tegra_smmu *smmu = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	seq_printf(s, "client       enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	seq_printf(s, "--------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	for (i = 0; i < smmu->soc->num_clients; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		const struct tegra_mc_client *client = &smmu->soc->clients[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		const char *status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		value = smmu_readl(smmu, client->smmu.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		if (value & BIT(client->smmu.bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			status = "yes";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			status = "no";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		seq_printf(s, "%-12s %s\n", client->name, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	smmu->debugfs = debugfs_create_dir("smmu", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	if (!smmu->debugfs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			    &tegra_smmu_swgroups_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 			    &tegra_smmu_clients_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	debugfs_remove_recursive(smmu->debugfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct tegra_smmu *tegra_smmu_probe(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 				    const struct tegra_smmu_soc *soc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 				    struct tegra_mc *mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	struct tegra_smmu *smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	if (!smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	 * This is a bit of a hack. Ideally we'd want to simply return this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	 * value. However the IOMMU registration process will attempt to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	 * all devices to the IOMMU when bus_set_iommu() is called. In order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	 * not to rely on global variables to track the IOMMU instance, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	 * set it here so that it can be looked up from the .probe_device()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	 * callback via the IOMMU device's .drvdata field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	mc->smmu = smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	if (!smmu->asids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	INIT_LIST_HEAD(&smmu->groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	mutex_init(&smmu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	smmu->regs = mc->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	smmu->soc = soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	smmu->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	smmu->mc = mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	smmu->pfn_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		mc->soc->num_address_bits, smmu->pfn_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		smmu->tlb_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	if (soc->supports_request_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	smmu_writel(smmu, value, SMMU_PTC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	if (soc->supports_round_robin_arbitration)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	smmu_writel(smmu, value, SMMU_TLB_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	smmu_flush_ptc_all(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	smmu_flush_tlb(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	smmu_flush(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	tegra_smmu_ahb_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	err = iommu_device_register(&smmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		iommu_device_sysfs_remove(&smmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		iommu_device_unregister(&smmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		iommu_device_sysfs_remove(&smmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	if (IS_ENABLED(CONFIG_DEBUG_FS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		tegra_smmu_debugfs_init(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	return smmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) void tegra_smmu_remove(struct tegra_smmu *smmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	iommu_device_unregister(&smmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	iommu_device_sysfs_remove(&smmu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	if (IS_ENABLED(CONFIG_DEBUG_FS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		tegra_smmu_debugfs_exit(smmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }