Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) // Copyright (C) 2016-2018, Allwinner Technology CO., LTD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) // Copyright (C) 2019-2020, Cerno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/dma-direction.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define IOMMU_RESET_REG			0x010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define IOMMU_ENABLE_REG		0x020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define IOMMU_ENABLE_ENABLE			BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define IOMMU_BYPASS_REG		0x030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define IOMMU_AUTO_GATING_REG		0x040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define IOMMU_AUTO_GATING_ENABLE		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define IOMMU_WBUF_CTRL_REG		0x044
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define IOMMU_OOO_CTRL_REG		0x048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define IOMMU_4KB_BDY_PRT_CTRL_REG	0x04c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define IOMMU_TTB_REG			0x050
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define IOMMU_TLB_ENABLE_REG		0x060
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define IOMMU_TLB_PREFETCH_REG		0x070
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m)	BIT(m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define IOMMU_TLB_FLUSH_REG		0x080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define IOMMU_TLB_FLUSH_PTW_CACHE		BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define IOMMU_TLB_FLUSH_MACRO_TLB		BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define IOMMU_TLB_FLUSH_MICRO_TLB(i)		(BIT(i) & GENMASK(5, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define IOMMU_TLB_IVLD_ADDR_REG		0x090
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define IOMMU_TLB_IVLD_ADDR_MASK_REG	0x094
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define IOMMU_TLB_IVLD_ENABLE_REG	0x098
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define IOMMU_TLB_IVLD_ENABLE_ENABLE		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define IOMMU_PC_IVLD_ADDR_REG		0x0a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define IOMMU_PC_IVLD_ENABLE_REG	0x0a8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define IOMMU_PC_IVLD_ENABLE_ENABLE		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define IOMMU_DM_AUT_CTRL_REG(d)	(0x0b0 + ((d) / 2) * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m)	(1 << (((d & 1) * 16) + ((m) * 2)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m)	(1 << (((d & 1) * 16) + ((m) * 2) + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define IOMMU_DM_AUT_OVWT_REG		0x0d0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define IOMMU_INT_ENABLE_REG		0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define IOMMU_INT_CLR_REG		0x104
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define IOMMU_INT_STA_REG		0x108
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define IOMMU_INT_ERR_ADDR_REG(i)	(0x110 + (i) * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define IOMMU_INT_ERR_ADDR_L1_REG	0x130
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define IOMMU_INT_ERR_ADDR_L2_REG	0x134
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define IOMMU_INT_ERR_DATA_REG(i)	(0x150 + (i) * 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define IOMMU_L1PG_INT_REG		0x0180
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define IOMMU_L2PG_INT_REG		0x0184
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define IOMMU_INT_INVALID_L2PG			BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define IOMMU_INT_INVALID_L1PG			BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define IOMMU_INT_MASTER_PERMISSION(m)		BIT(m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define IOMMU_INT_MASTER_MASK			(IOMMU_INT_MASTER_PERMISSION(0) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 						 IOMMU_INT_MASTER_PERMISSION(1) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 						 IOMMU_INT_MASTER_PERMISSION(2) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 						 IOMMU_INT_MASTER_PERMISSION(3) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 						 IOMMU_INT_MASTER_PERMISSION(4) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 						 IOMMU_INT_MASTER_PERMISSION(5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define IOMMU_INT_MASK				(IOMMU_INT_INVALID_L1PG | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 						 IOMMU_INT_INVALID_L2PG | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 						 IOMMU_INT_MASTER_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define PT_ENTRY_SIZE			sizeof(u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define NUM_DT_ENTRIES			4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define DT_SIZE				(NUM_DT_ENTRIES * PT_ENTRY_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define NUM_PT_ENTRIES			256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define PT_SIZE				(NUM_PT_ENTRIES * PT_ENTRY_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) struct sun50i_iommu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	struct iommu_device iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	/* Lock to modify the IOMMU registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	spinlock_t iommu_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	struct reset_control *reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	struct iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	struct kmem_cache *pt_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) struct sun50i_iommu_domain {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	struct iommu_domain domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	/* Number of devices attached to the domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	refcount_t refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	/* L1 Page Table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	u32 *dt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	dma_addr_t dt_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	struct sun50i_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) static struct sun50i_iommu_domain *to_sun50i_domain(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	return container_of(domain, struct sun50i_iommu_domain, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static struct sun50i_iommu *sun50i_iommu_from_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	return dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	return readl(iommu->base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	writel(value, iommu->base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  * The Allwinner H6 IOMMU uses a 2-level page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  * The first level is the usual Directory Table (DT), that consists of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  * Table (PT).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * Each PT consits of 256 4-bytes Page Table Entries (PTE), each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  * pointing to a 4kB page of physical memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * register that contains its physical address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #define SUN50I_IOVA_DTE_MASK	GENMASK(31, 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) #define SUN50I_IOVA_PTE_MASK	GENMASK(19, 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define SUN50I_IOVA_PAGE_MASK	GENMASK(11, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static u32 sun50i_iova_get_dte_index(dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static u32 sun50i_iova_get_pte_index(dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) static u32 sun50i_iova_get_page_offset(dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  * Each Directory Table Entry has a Page Table address and a valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  * bit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  * +---------------------+-----------+-+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * | PT address          | Reserved  |V|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * +---------------------+-----------+-+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  *  31:10 - Page Table address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  *   9:2  - Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  *   1:0  - 1 if the entry is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) #define SUN50I_DTE_PT_ADDRESS_MASK	GENMASK(31, 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) #define SUN50I_DTE_PT_ATTRS		GENMASK(1, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) #define SUN50I_DTE_PT_VALID		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) static phys_addr_t sun50i_dte_get_pt_address(u32 dte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	return (phys_addr_t)dte & SUN50I_DTE_PT_ADDRESS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) static bool sun50i_dte_is_pt_valid(u32 dte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	return (dte & SUN50I_DTE_PT_ATTRS) == SUN50I_DTE_PT_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) static u32 sun50i_mk_dte(dma_addr_t pt_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	return (pt_dma & SUN50I_DTE_PT_ADDRESS_MASK) | SUN50I_DTE_PT_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  * Each PTE has a Page address, an authority index and a valid bit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  * +----------------+-----+-----+-----+---+-----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  * | Page address   | Rsv | ACI | Rsv | V | Rsv |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  * +----------------+-----+-----+-----+---+-----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  *  31:12 - Page address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  *  11:8  - Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  *   7:4  - Authority Control Index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  *   3:2  - Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  *     1  - 1 if the entry is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  *     0  - Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  * The way permissions work is that the IOMMU has 16 "domains" that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  * can be configured to give each masters either read or write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  * permissions through the IOMMU_DM_AUT_CTRL_REG registers. The domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  * 0 seems like the default domain, and its permissions in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * useful to enforce any particular permission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  * Each page entry will then have a reference to the domain they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  * affected to, so that we can actually enforce them on a per-page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  * basis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233)  * In order to make it work with the IOMMU framework, we will be using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234)  * 4 different domains, starting at 1: RD_WR, RD, WR and NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  * depending on the permission we want to enforce. Each domain will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236)  * have each master setup in the same way, since the IOMMU framework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237)  * doesn't seem to restrict page access on a per-device basis. And
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238)  * then we will use the relevant domain index when generating the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239)  * table entry depending on the permissions we want to be enforced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) enum sun50i_iommu_aci {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	SUN50I_IOMMU_ACI_DO_NOT_USE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	SUN50I_IOMMU_ACI_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	SUN50I_IOMMU_ACI_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	SUN50I_IOMMU_ACI_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	SUN50I_IOMMU_ACI_RD_WR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) #define SUN50I_PTE_PAGE_ADDRESS_MASK	GENMASK(31, 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) #define SUN50I_PTE_ACI_MASK		GENMASK(7, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) #define SUN50I_PTE_PAGE_VALID		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) static phys_addr_t sun50i_pte_get_page_address(u32 pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	return (phys_addr_t)pte & SUN50I_PTE_PAGE_ADDRESS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) static enum sun50i_iommu_aci sun50i_get_pte_aci(u32 pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	return FIELD_GET(SUN50I_PTE_ACI_MASK, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) static bool sun50i_pte_is_page_valid(u32 pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	return pte & SUN50I_PTE_PAGE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) static u32 sun50i_mk_pte(phys_addr_t page, int prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	enum sun50i_iommu_aci aci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	if (prot & (IOMMU_READ | IOMMU_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		aci = SUN50I_IOMMU_ACI_RD_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	else if (prot & IOMMU_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		aci = SUN50I_IOMMU_ACI_RD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	else if (prot & IOMMU_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		aci = SUN50I_IOMMU_ACI_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		aci = SUN50I_IOMMU_ACI_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	flags |= FIELD_PREP(SUN50I_PTE_ACI_MASK, aci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	page &= SUN50I_PTE_PAGE_ADDRESS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	return page | flags | SUN50I_PTE_PAGE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 			       void *vaddr, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	struct sun50i_iommu *iommu = sun50i_domain->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	dma_addr_t dma = virt_to_phys(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	size_t size = count * PT_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	assert_spin_locked(&iommu->iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	iommu_write(iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		    IOMMU_TLB_FLUSH_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		    IOMMU_TLB_FLUSH_PTW_CACHE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		    IOMMU_TLB_FLUSH_MACRO_TLB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		    IOMMU_TLB_FLUSH_MICRO_TLB(5) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		    IOMMU_TLB_FLUSH_MICRO_TLB(4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		    IOMMU_TLB_FLUSH_MICRO_TLB(3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		    IOMMU_TLB_FLUSH_MICRO_TLB(2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		    IOMMU_TLB_FLUSH_MICRO_TLB(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		    IOMMU_TLB_FLUSH_MICRO_TLB(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 					reg, !reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 					1, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		dev_warn(iommu->dev, "TLB Flush timed out!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	struct sun50i_iommu *iommu = sun50i_domain->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	 * At boot, we'll have a first call into .flush_iotlb_all right after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	 * .probe_device, and since we link our (single) domain to our iommu in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	 * the .attach_device callback, we don't have that pointer set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	 * It shouldn't really be any trouble to ignore it though since we flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	 * all caches as part of the device powerup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	spin_lock_irqsave(&iommu->iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	sun50i_iommu_flush_all_tlb(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	spin_unlock_irqrestore(&iommu->iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 				    struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	sun50i_iommu_flush_iotlb_all(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	struct sun50i_iommu_domain *sun50i_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	if (!iommu->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	sun50i_domain = to_sun50i_domain(iommu->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	ret = reset_control_deassert(iommu->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	ret = clk_prepare_enable(iommu->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		goto err_reset_assert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	spin_lock_irqsave(&iommu->iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	iommu_write(iommu, IOMMU_TLB_PREFETCH_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		    IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	ret = sun50i_iommu_flush_all_tlb(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		spin_unlock_irqrestore(&iommu->iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		goto err_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	spin_unlock_irqrestore(&iommu->iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) err_clk_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	clk_disable_unprepare(iommu->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) err_reset_assert:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	reset_control_assert(iommu->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) static void sun50i_iommu_disable(struct sun50i_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	spin_lock_irqsave(&iommu->iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	iommu_write(iommu, IOMMU_ENABLE_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	iommu_write(iommu, IOMMU_TTB_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	spin_unlock_irqrestore(&iommu->iommu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	clk_disable_unprepare(iommu->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	reset_control_assert(iommu->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 					   gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	dma_addr_t pt_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	u32 *page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	page_table = kmem_cache_zalloc(iommu->pt_pool, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (!page_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	if (dma_mapping_error(iommu->dev, pt_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		dev_err(iommu->dev, "Couldn't map L2 Page Table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		kmem_cache_free(iommu->pt_pool, page_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	/* We rely on the physical address and DMA address being the same */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	WARN_ON(pt_dma != virt_to_phys(page_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	return page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 					 u32 *page_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	phys_addr_t pt_phys = virt_to_phys(page_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	kmem_cache_free(iommu->pt_pool, page_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 				      dma_addr_t iova, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	struct sun50i_iommu *iommu = sun50i_domain->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	u32 *page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	u32 *dte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	u32 old_dte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	u32 dte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	dte = *dte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	if (sun50i_dte_is_pt_valid(dte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		phys_addr_t pt_phys = sun50i_dte_get_pt_address(dte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		return (u32 *)phys_to_virt(pt_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	page_table = sun50i_iommu_alloc_page_table(iommu, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	if (IS_ERR(page_table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		return page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	dte = sun50i_mk_dte(virt_to_phys(page_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	old_dte = cmpxchg(dte_addr, 0, dte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	if (old_dte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		phys_addr_t installed_pt_phys =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 			sun50i_dte_get_pt_address(old_dte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		u32 *installed_pt = phys_to_virt(installed_pt_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		u32 *drop_pt = page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		page_table = installed_pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		dte = old_dte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		sun50i_iommu_free_page_table(iommu, drop_pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	sun50i_table_flush(sun50i_domain, page_table, PT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	sun50i_table_flush(sun50i_domain, dte_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	return page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			    phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	struct sun50i_iommu *iommu = sun50i_domain->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	u32 pte_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	u32 *page_table, *pte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (IS_ERR(page_table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		ret = PTR_ERR(page_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	pte_index = sun50i_iova_get_pte_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	pte_addr = &page_table[pte_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	if (unlikely(sun50i_pte_is_page_valid(*pte_addr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		phys_addr_t page_phys = sun50i_pte_get_page_address(*pte_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		dev_err(iommu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 			"iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			&iova, &page_phys, &paddr, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	*pte_addr = sun50i_mk_pte(paddr, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	sun50i_table_flush(sun50i_domain, pte_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 				 size_t size, struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	phys_addr_t pt_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	u32 *pte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	u32 dte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	if (!sun50i_dte_is_pt_valid(dte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	pt_phys = sun50i_dte_get_pt_address(dte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	if (!sun50i_pte_is_page_valid(*pte_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	memset(pte_addr, 0, sizeof(*pte_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	sun50i_table_flush(sun50i_domain, pte_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	return SZ_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 					     dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	phys_addr_t pt_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	u32 *page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	u32 dte, pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	if (!sun50i_dte_is_pt_valid(dte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	pt_phys = sun50i_dte_get_pt_address(dte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	page_table = (u32 *)phys_to_virt(pt_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	pte = page_table[sun50i_iova_get_pte_index(iova)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	if (!sun50i_pte_is_page_valid(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	return sun50i_pte_get_page_address(pte) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		sun50i_iova_get_page_offset(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	struct sun50i_iommu_domain *sun50i_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	if (type != IOMMU_DOMAIN_DMA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	    type != IOMMU_DOMAIN_IDENTITY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	    type != IOMMU_DOMAIN_UNMANAGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	if (!sun50i_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	if (type == IOMMU_DOMAIN_DMA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	    iommu_get_dma_cookie(&sun50i_domain->domain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		goto err_free_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 						    get_order(DT_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (!sun50i_domain->dt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		goto err_put_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	refcount_set(&sun50i_domain->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	sun50i_domain->domain.geometry.aperture_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	sun50i_domain->domain.geometry.force_aperture = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	return &sun50i_domain->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) err_put_cookie:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	if (type == IOMMU_DOMAIN_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		iommu_put_dma_cookie(&sun50i_domain->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) err_free_domain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	kfree(sun50i_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) static void sun50i_iommu_domain_free(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	sun50i_domain->dt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	iommu_put_dma_cookie(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	kfree(sun50i_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 				      struct sun50i_iommu_domain *sun50i_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	iommu->domain = &sun50i_domain->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	sun50i_domain->iommu = iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 					       DT_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		dev_err(iommu->dev, "Couldn't map L1 Page Table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	return sun50i_iommu_enable(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 				       struct sun50i_iommu_domain *sun50i_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	for (i = 0; i < NUM_DT_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		phys_addr_t pt_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		u32 *page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		u32 *dte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		u32 dte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		dte_addr = &sun50i_domain->dt[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		dte = *dte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		if (!sun50i_dte_is_pt_valid(dte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		memset(dte_addr, 0, sizeof(*dte_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		sun50i_table_flush(sun50i_domain, dte_addr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		pt_phys = sun50i_dte_get_pt_address(dte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		page_table = phys_to_virt(pt_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		sun50i_iommu_free_page_table(iommu, page_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	sun50i_iommu_disable(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 			 DT_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	iommu->domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) static void sun50i_iommu_detach_device(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 				       struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	dev_dbg(dev, "Detaching from IOMMU domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	if (iommu->domain != domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	if (refcount_dec_and_test(&sun50i_domain->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		sun50i_iommu_detach_domain(iommu, sun50i_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) static int sun50i_iommu_attach_device(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 				      struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	struct sun50i_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	iommu = sun50i_iommu_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	dev_dbg(dev, "Attaching to IOMMU domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	refcount_inc(&sun50i_domain->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (iommu->domain == domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (iommu->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		sun50i_iommu_detach_device(iommu->domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	sun50i_iommu_attach_domain(iommu, sun50i_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	struct sun50i_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	iommu = sun50i_iommu_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	return &iommu->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) static void sun50i_iommu_release_device(struct device *dev) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	return iommu_group_ref_get(iommu->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) static int sun50i_iommu_of_xlate(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 				 struct of_phandle_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	unsigned id = args->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	return iommu_fwspec_add_ids(dev, &id, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) static const struct iommu_ops sun50i_iommu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	.pgsize_bitmap	= SZ_4K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	.attach_dev	= sun50i_iommu_attach_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	.detach_dev	= sun50i_iommu_detach_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	.device_group	= sun50i_iommu_device_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	.domain_alloc	= sun50i_iommu_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	.domain_free	= sun50i_iommu_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	.flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	.iotlb_sync	= sun50i_iommu_iotlb_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	.iova_to_phys	= sun50i_iommu_iova_to_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	.map		= sun50i_iommu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	.of_xlate	= sun50i_iommu_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	.probe_device	= sun50i_iommu_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	.release_device	= sun50i_iommu_release_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	.unmap		= sun50i_iommu_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 				      unsigned master, phys_addr_t iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 				      unsigned prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		&iova, master, (prot == IOMMU_FAULT_WRITE) ? "wr" : "rd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (iommu->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 					      unsigned addr_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 					      unsigned blame_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	phys_addr_t iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	unsigned master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	u32 blame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	assert_spin_locked(&iommu->iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	iova = iommu_read(iommu, addr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	blame = iommu_read(iommu, blame_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	master = ilog2(blame & IOMMU_INT_MASTER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	 * If the address is not in the page table, we can't get what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	 * operation triggered the fault. Assume it's a read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	 * operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	return iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	enum sun50i_iommu_aci aci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	phys_addr_t iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	unsigned master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	unsigned dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	u32 blame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	assert_spin_locked(&iommu->iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	blame = iommu_read(iommu, IOMMU_INT_STA_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	master = ilog2(blame & IOMMU_INT_MASTER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	aci = sun50i_get_pte_aci(iommu_read(iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 					    IOMMU_INT_ERR_DATA_REG(master)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	switch (aci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		 * If we are in the read-only domain, then it means we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		 * tried to write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	case SUN50I_IOMMU_ACI_RD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		dir = IOMMU_FAULT_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		 * If we are in the write-only domain, then it means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		 * we tried to read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	case SUN50I_IOMMU_ACI_WR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		 * If we are in the domain without any permission, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		 * can't really tell. Let's default to a read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		 * operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	case SUN50I_IOMMU_ACI_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		/* WTF? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	case SUN50I_IOMMU_ACI_RD_WR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		dir = IOMMU_FAULT_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	 * If the address is not in the page table, we can't get what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	 * operation triggered the fault. Assume it's a read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	 * operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	sun50i_iommu_report_fault(iommu, master, iova, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	return iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	struct sun50i_iommu *iommu = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	spin_lock(&iommu->iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	status = iommu_read(iommu, IOMMU_INT_STA_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	if (!(status & IOMMU_INT_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		spin_unlock(&iommu->iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	if (status & IOMMU_INT_INVALID_L2PG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		sun50i_iommu_handle_pt_irq(iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 					    IOMMU_INT_ERR_ADDR_L2_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 					    IOMMU_L2PG_INT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	else if (status & IOMMU_INT_INVALID_L1PG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		sun50i_iommu_handle_pt_irq(iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 					   IOMMU_INT_ERR_ADDR_L1_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 					   IOMMU_L1PG_INT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		sun50i_iommu_handle_perm_irq(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	iommu_write(iommu, IOMMU_INT_CLR_REG, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	iommu_write(iommu, IOMMU_RESET_REG, ~status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	iommu_write(iommu, IOMMU_RESET_REG, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	spin_unlock(&iommu->iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) static int sun50i_iommu_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	struct sun50i_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	int ret, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	spin_lock_init(&iommu->iommu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	platform_set_drvdata(pdev, iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	iommu->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 					   PT_SIZE, PT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 					   SLAB_HWCACHE_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 					   NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	if (!iommu->pt_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	iommu->group = iommu_group_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	if (IS_ERR(iommu->group)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		ret = PTR_ERR(iommu->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		goto err_free_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	iommu->base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	if (IS_ERR(iommu->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		ret = PTR_ERR(iommu->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		goto err_free_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		ret = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		goto err_free_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	iommu->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	if (IS_ERR(iommu->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		dev_err(&pdev->dev, "Couldn't get our clock.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		ret = PTR_ERR(iommu->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		goto err_free_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	iommu->reset = devm_reset_control_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	if (IS_ERR(iommu->reset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		dev_err(&pdev->dev, "Couldn't get our reset line.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		ret = PTR_ERR(iommu->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		goto err_free_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 				     NULL, dev_name(&pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		goto err_free_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	iommu_device_set_ops(&iommu->iommu, &sun50i_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	ret = iommu_device_register(&iommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		goto err_remove_sysfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	ret = devm_request_irq(&pdev->dev, irq, sun50i_iommu_irq, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			       dev_name(&pdev->dev), iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		goto err_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	bus_set_iommu(&platform_bus_type, &sun50i_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) err_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	iommu_device_unregister(&iommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) err_remove_sysfs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	iommu_device_sysfs_remove(&iommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) err_free_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	iommu_group_put(iommu->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) err_free_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	kmem_cache_destroy(iommu->pt_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static const struct of_device_id sun50i_iommu_dt[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	{ .compatible = "allwinner,sun50i-h6-iommu", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	{ /* sentinel */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) MODULE_DEVICE_TABLE(of, sun50i_iommu_dt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static struct platform_driver sun50i_iommu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	.driver		= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		.name			= "sun50i-iommu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		.of_match_table 	= sun50i_iommu_dt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		.suppress_bind_attrs	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) builtin_platform_driver_probe(sun50i_iommu_driver, sun50i_iommu_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) MODULE_LICENSE("Dual BSD/GPL");