^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2015-2016 MediaTek Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Yong Wu <yong.wu@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/component.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dma-direct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mfd/syscon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/soc/mediatek/infracfg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <soc/mediatek/smi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "mtk_iommu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define REG_MMU_PT_BASE_ADDR 0x000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define MMU_PT_ADDR_MASK GENMASK(31, 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define REG_MMU_INVALIDATE 0x020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define F_ALL_INVLD 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define F_MMU_INV_RANGE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define REG_MMU_INVLD_START_A 0x024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define REG_MMU_INVLD_END_A 0x028
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define REG_MMU_INV_SEL_GEN2 0x02c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define REG_MMU_INV_SEL_GEN1 0x038
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define F_INVLD_EN0 BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define F_INVLD_EN1 BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define REG_MMU_MISC_CTRL 0x048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define REG_MMU_DCM_DIS 0x050
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define REG_MMU_WR_LEN_CTRL 0x054
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define REG_MMU_CTRL_REG 0x110
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define REG_MMU_IVRP_PADDR 0x114
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define REG_MMU_VLD_PA_RNG 0x118
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define REG_MMU_INT_CONTROL0 0x120
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define F_L2_MULIT_HIT_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define F_TABLE_WALK_FAULT_INT_EN BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define F_MISS_FIFO_ERR_INT_EN BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define F_INT_CLR_BIT BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define REG_MMU_INT_MAIN_CONTROL 0x124
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* mmu0 | mmu1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define REG_MMU_CPE_DONE 0x12C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define REG_MMU_FAULT_ST1 0x134
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define F_REG_MMU0_FAULT_MASK GENMASK(6, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define F_REG_MMU1_FAULT_MASK GENMASK(13, 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define REG_MMU0_FAULT_VA 0x13c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define F_MMU_INVAL_VA_31_12_MASK GENMASK(31, 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define F_MMU_INVAL_VA_34_32_MASK GENMASK(11, 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define F_MMU_INVAL_PA_34_32_MASK GENMASK(8, 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define REG_MMU0_INVLD_PA 0x140
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define REG_MMU1_FAULT_VA 0x144
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define REG_MMU1_INVLD_PA 0x148
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define REG_MMU0_INT_ID 0x150
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define REG_MMU1_INT_ID 0x154
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define MTK_PROTECT_PA_ALIGN 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define HAS_4GB_MODE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* HW will use the EMI clock if there isn't the "bclk". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define HAS_BCLK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define HAS_VLD_PA_RNG BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define RESET_AXI BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define OUT_ORDER_WR_EN BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define HAS_SUB_COMM BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define WR_THROT_EN BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define HAS_LEGACY_IVRP_PADDR BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define IOVA_34_EN BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define MTK_IOMMU_HAS_FLAG(pdata, _x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ((((pdata)->flags) & (_x)) == (_x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct mtk_iommu_domain {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct io_pgtable_cfg cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct io_pgtable_ops *iop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct mtk_iommu_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct iommu_domain domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static const struct iommu_ops mtk_iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static int mtk_iommu_hw_init(const struct mtk_iommu_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define MTK_IOMMU_TLB_ADDR(iova) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) dma_addr_t _addr = iova; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) ((lower_32_bits(_addr) & GENMASK(31, 12)) | upper_32_bits(_addr));\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * In M4U 4GB mode, the physical address is remapped as below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * CPU Physical address:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * ====================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * 0 1G 2G 3G 4G 5G
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * |---A---|---B---|---C---|---D---|---E---|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * +--I/O--+------------Memory-------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * IOMMU output physical address:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * =============================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * 4G 5G 6G 7G 8G
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * |---E---|---B---|---C---|---D---|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * +------------Memory-------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * bit32 of the CPU physical address always is needed to set, and for Region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * 'E', the CPU physical address keep as is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Additionally, The iommu consumers always use the CPU phyiscal address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static LIST_HEAD(m4ulist); /* List all the M4U HWs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct mtk_iommu_iova_region {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) dma_addr_t iova_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) unsigned long long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static const struct mtk_iommu_iova_region single_domain[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {.iova_base = 0, .size = SZ_4G},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static const struct mtk_iommu_iova_region mt8192_multi_dom[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) { .iova_base = 0x0, .size = SZ_4G}, /* disp: 0 ~ 4G */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) { .iova_base = SZ_4G, .size = SZ_4G}, /* vdec: 4G ~ 8G */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) { .iova_base = SZ_4G * 2, .size = SZ_4G}, /* CAM/MDP: 8G ~ 12G */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) { .iova_base = 0x240000000ULL, .size = 0x4000000}, /* CCU0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) { .iova_base = 0x244000000ULL, .size = 0x4000000}, /* CCU1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * for the performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Here always return the mtk_iommu_data of the first probed M4U where the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * iommu domain information is recorded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct mtk_iommu_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) for_each_m4u(data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return container_of(dom, struct mtk_iommu_domain, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) for_each_m4u(data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (pm_runtime_get_if_in_use(data->dev) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) data->base + data->plat_data->inv_sel_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) wmb(); /* Make sure the tlb flush all done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) pm_runtime_put(data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) size_t granule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct mtk_iommu_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) bool has_pm = !!data->dev->pm_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) for_each_m4u(data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (has_pm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (pm_runtime_get_if_in_use(data->dev) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) spin_lock_irqsave(&data->tlb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) data->base + data->plat_data->inv_sel_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) writel_relaxed(MTK_IOMMU_TLB_ADDR(iova),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) data->base + REG_MMU_INVLD_START_A);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) data->base + REG_MMU_INVLD_END_A);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) writel_relaxed(F_MMU_INV_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) data->base + REG_MMU_INVALIDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* tlb sync */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) tmp, tmp != 0, 10, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) dev_warn(data->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) "Partial TLB flush timed out, falling back to full flush\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) mtk_iommu_tlb_flush_all(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* Clear the CPE status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) spin_unlock_irqrestore(&data->tlb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (has_pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) pm_runtime_put(data->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct mtk_iommu_data *data = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct mtk_iommu_domain *dom = data->m4u_dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unsigned int fault_larb, fault_port, sub_comm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) u32 int_state, regval, va34_32, pa34_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u64 fault_iova, fault_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) bool layer, write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* Read error info from registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (int_state & F_REG_MMU0_FAULT_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) va34_32 = FIELD_GET(F_MMU_INVAL_VA_34_32_MASK, fault_iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) fault_iova = fault_iova & F_MMU_INVAL_VA_31_12_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) fault_iova |= (u64)va34_32 << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) fault_pa |= (u64)pa34_32 << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) fault_port = F_MMU_INT_ID_PORT_ID(regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) fault_larb = F_MMU_INT_ID_COMM_ID(regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) fault_larb = F_MMU_INT_ID_LARB_ID(regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) dev_err_ratelimited(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) data->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) "fault type=0x%x iova=0x%llx pa=0x%llx larb=%d port=%d layer=%d %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int_state, fault_iova, fault_pa, fault_larb, fault_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) layer, write ? "write" : "read");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /* Interrupt clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) regval |= F_INT_CLR_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) mtk_iommu_tlb_flush_all(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int mtk_iommu_get_domain_id(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) const struct mtk_iommu_plat_data *plat_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) const struct mtk_iommu_iova_region *rgn = plat_data->iova_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) const struct bus_dma_region *dma_rgn = dev->dma_range_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int i, candidate = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) dma_addr_t dma_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!dma_rgn || plat_data->iova_region_nr == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) dma_end = dma_rgn->dma_start + dma_rgn->size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) for (i = 0; i < plat_data->iova_region_nr; i++, rgn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Best fit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (dma_rgn->dma_start == rgn->iova_base &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) dma_end == rgn->iova_base + rgn->size - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* ok if it is inside this region. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (dma_rgn->dma_start >= rgn->iova_base &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dma_end < rgn->iova_base + rgn->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) candidate = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (candidate >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return candidate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) dev_err(dev, "Can NOT find the iommu domain id(%pad 0x%llx).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) &dma_rgn->dma_start, dma_rgn->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static void mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) bool enable, unsigned int domid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct mtk_smi_larb_iommu *larb_mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) unsigned int larbid, portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) const struct mtk_iommu_iova_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) for (i = 0; i < fwspec->num_ids; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) larb_mmu = &data->larb_imu[larbid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) region = data->plat_data->iova_region + domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) larb_mmu->bank[portid] = upper_32_bits(region->iova_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) dev_dbg(dev, "%s iommu for larb(%s) port %d dom %d bank %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) enable ? "enable" : "disable", dev_name(larb_mmu->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) portid, domid, larb_mmu->bank[portid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct mtk_iommu_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) unsigned int domid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) const struct mtk_iommu_iova_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* Use the exist domain as there is only one pgtable here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (data->m4u_dom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) dom->iop = data->m4u_dom->iop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) dom->cfg = data->m4u_dom->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) dom->domain.pgsize_bitmap = data->m4u_dom->cfg.pgsize_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) goto update_iova_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) dom->cfg = (struct io_pgtable_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) .quirks = IO_PGTABLE_QUIRK_ARM_NS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) IO_PGTABLE_QUIRK_NO_PERMS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) IO_PGTABLE_QUIRK_ARM_MTK_EXT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) .ias = MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN) ? 34 : 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) .iommu_dev = data->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) dom->cfg.oas = data->enable_4GB ? 33 : 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) dom->cfg.oas = 35;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (!dom->iop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dev_err(data->dev, "Failed to alloc io pgtable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* Update our support page sizes bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) update_iova_region:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* Update the iova region for this domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) region = data->plat_data->iova_region + domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) dom->domain.geometry.aperture_start = region->iova_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) dom->domain.geometry.aperture_end = region->iova_base + region->size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dom->domain.geometry.force_aperture = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct mtk_iommu_domain *dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (type != IOMMU_DOMAIN_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) dom = kzalloc(sizeof(*dom), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (iommu_get_dma_cookie(&dom->domain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) kfree(dom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return &dom->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static void mtk_iommu_domain_free(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) iommu_put_dma_cookie(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) kfree(to_mtk_domain(domain));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static int mtk_iommu_attach_device(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct mtk_iommu_domain *dom = to_mtk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct device *m4udev = data->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int ret, domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) domid = mtk_iommu_get_domain_id(dev, data->plat_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (domid < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (!dom->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (mtk_iommu_domain_finalise(dom, data, domid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) dom->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (!data->m4u_dom) { /* Initialize the M4U HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) ret = pm_runtime_resume_and_get(m4udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ret = mtk_iommu_hw_init(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) pm_runtime_put(m4udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) data->m4u_dom = dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) data->base + REG_MMU_PT_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) pm_runtime_put(m4udev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) mtk_iommu_config(data, dev, true, domid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static void mtk_iommu_detach_device(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) mtk_iommu_config(data, dev, false, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct mtk_iommu_domain *dom = to_mtk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* The "4GB mode" M4U physically can not use the lower remap of Dram. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (dom->data->enable_4GB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) paddr |= BIT_ULL(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /* Synchronize with the tlb_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static size_t mtk_iommu_unmap(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct mtk_iommu_domain *dom = to_mtk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) unsigned long end = iova + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (gather->start > iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) gather->start = iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (gather->end < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) gather->end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return dom->iop->unmap(dom->iop, iova, size, gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct mtk_iommu_domain *dom = to_mtk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) mtk_iommu_tlb_flush_all(dom->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct mtk_iommu_domain *dom = to_mtk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) size_t length = gather->end - gather->start + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) dom->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct mtk_iommu_domain *dom = to_mtk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) mtk_iommu_tlb_flush_range_sync(iova, size, size, dom->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct mtk_iommu_domain *dom = to_mtk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) phys_addr_t pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) pa = dom->iop->iova_to_phys(dom->iop, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (dom->data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) pa &= ~BIT_ULL(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct mtk_iommu_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (!fwspec || fwspec->ops != &mtk_iommu_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return ERR_PTR(-ENODEV); /* Not a iommu client device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return &data->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static void mtk_iommu_release_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (!fwspec || fwspec->ops != &mtk_iommu_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) iommu_fwspec_free(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static struct iommu_group *mtk_iommu_device_group(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) domid = mtk_iommu_get_domain_id(dev, data->plat_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (domid < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return ERR_PTR(domid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) group = data->m4u_group[domid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (!group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) group = iommu_group_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (!IS_ERR(group))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) data->m4u_group[domid] = group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) iommu_group_ref_get(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct platform_device *m4updev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (args->args_count != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) args->args_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (!dev_iommu_priv_get(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* Get the m4u device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) m4updev = of_find_device_by_node(args->np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (WARN_ON(!m4updev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return iommu_fwspec_add_ids(dev, args->args, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static void mtk_iommu_get_resv_regions(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) unsigned int domid = mtk_iommu_get_domain_id(dev, data->plat_data), i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) const struct mtk_iommu_iova_region *resv, *curdom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct iommu_resv_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) int prot = IOMMU_WRITE | IOMMU_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if ((int)domid < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) curdom = data->plat_data->iova_region + domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) for (i = 0; i < data->plat_data->iova_region_nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) resv = data->plat_data->iova_region + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /* Only reserve when the region is inside the current domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (resv->iova_base <= curdom->iova_base ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) resv->iova_base + resv->size >= curdom->iova_base + curdom->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) region = iommu_alloc_resv_region(resv->iova_base, resv->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) prot, IOMMU_RESV_RESERVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (!region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) list_add_tail(®ion->list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static const struct iommu_ops mtk_iommu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) .domain_alloc = mtk_iommu_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) .domain_free = mtk_iommu_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) .attach_dev = mtk_iommu_attach_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) .detach_dev = mtk_iommu_detach_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) .map = mtk_iommu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) .unmap = mtk_iommu_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) .flush_iotlb_all = mtk_iommu_flush_iotlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) .iotlb_sync = mtk_iommu_iotlb_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) .iotlb_sync_map = mtk_iommu_sync_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) .iova_to_phys = mtk_iommu_iova_to_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) .probe_device = mtk_iommu_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) .release_device = mtk_iommu_release_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) .device_group = mtk_iommu_device_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) .of_xlate = mtk_iommu_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) .get_resv_regions = mtk_iommu_get_resv_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) .put_resv_regions = generic_iommu_put_resv_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) u32 regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (data->plat_data->m4u_plat == M4U_MT8173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) regval = readl_relaxed(data->base + REG_MMU_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) regval = F_L2_MULIT_HIT_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) F_TABLE_WALK_FAULT_INT_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) F_PREETCH_FIFO_OVERFLOW_INT_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) F_MISS_FIFO_OVERFLOW_INT_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) F_PREFETCH_FIFO_ERR_INT_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) F_MISS_FIFO_ERR_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) regval = F_INT_TRANSLATION_FAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) F_INT_MAIN_MULTI_HIT_FAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) F_INT_INVALID_PA_FAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) F_INT_ENTRY_REPLACEMENT_FAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) F_INT_TLB_MISS_FAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) F_INT_MISS_TRANSACTION_FIFO_FAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) regval = lower_32_bits(data->protect_base) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) upper_32_bits(data->protect_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (data->enable_4GB &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * If 4GB mode is enabled, the validate PA range is from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) regval = F_MMU_VLD_PA_RNG(7, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* write command throttling mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) regval &= ~F_MMU_WR_THROT_DIS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /* The register is called STANDARD_AXI_MODE in this case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) regval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) regval &= ~F_MMU_STANDARD_AXI_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) regval &= ~F_MMU_IN_ORDER_WR_EN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) dev_name(data->dev), (void *)data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static const struct component_master_ops mtk_iommu_com_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .bind = mtk_iommu_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) .unbind = mtk_iommu_unbind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static int mtk_iommu_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct mtk_iommu_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct device_node *larbnode, *smicomm_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct platform_device *plarbdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct device_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) resource_size_t ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct component_match *match = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct regmap *infracfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) void *protect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) int i, larb_nr, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) data->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) data->plat_data = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* Protect memory. HW will access here while translation fault.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (!protect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) switch (data->plat_data->m4u_plat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) case M4U_MT2712:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) p = "mediatek,mt2712-infracfg";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) case M4U_MT8173:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) p = "mediatek,mt8173-infracfg";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) infracfg = syscon_regmap_lookup_by_compatible(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (IS_ERR(infracfg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return PTR_ERR(infracfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) ret = regmap_read(infracfg, REG_INFRA_MISC, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) data->base = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (IS_ERR(data->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return PTR_ERR(data->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ioaddr = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) data->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (data->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return data->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) data->bclk = devm_clk_get(dev, "bclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (IS_ERR(data->bclk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return PTR_ERR(data->bclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) larb_nr = of_count_phandle_with_args(dev->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) "mediatek,larbs", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (larb_nr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return larb_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) for (i = 0; i < larb_nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (!larbnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (!of_device_is_available(larbnode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) of_node_put(larbnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (ret)/* The id is consecutive if there is no this property */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) plarbdev = of_find_device_by_node(larbnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (!plarbdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) of_node_put(larbnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) data->larb_imu[id].dev = &plarbdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) component_match_add_release(dev, &match, release_of,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) compare_of, larbnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /* Get smi-common dev from the last larb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) smicomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (!smicomm_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) plarbdev = of_find_device_by_node(smicomm_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) of_node_put(smicomm_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) data->smicomm_dev = &plarbdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) link = device_link_add(data->smicomm_dev, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (!link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) goto out_runtime_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) platform_set_drvdata(pdev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) "mtk-iommu.%pa", &ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) goto out_link_remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) ret = iommu_device_register(&data->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) goto out_sysfs_remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) spin_lock_init(&data->tlb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) list_add_tail(&data->list, &m4ulist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (!iommu_present(&platform_bus_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) goto out_list_del;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) goto out_bus_set_null;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) out_bus_set_null:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) bus_set_iommu(&platform_bus_type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) out_list_del:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) list_del(&data->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) iommu_device_unregister(&data->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) out_sysfs_remove:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) iommu_device_sysfs_remove(&data->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) out_link_remove:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) device_link_remove(data->smicomm_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) out_runtime_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) static int mtk_iommu_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct mtk_iommu_data *data = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) iommu_device_sysfs_remove(&data->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) iommu_device_unregister(&data->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (iommu_present(&platform_bus_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) bus_set_iommu(&platform_bus_type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) clk_disable_unprepare(data->bclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) device_link_remove(data->smicomm_dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) devm_free_irq(&pdev->dev, data->irq, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) component_master_del(&pdev->dev, &mtk_iommu_com_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) static int __maybe_unused mtk_iommu_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct mtk_iommu_data *data = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct mtk_iommu_suspend_reg *reg = &data->reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) void __iomem *base = data->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) clk_disable_unprepare(data->bclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) struct mtk_iommu_data *data = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct mtk_iommu_suspend_reg *reg = &data->reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) void __iomem *base = data->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) ret = clk_prepare_enable(data->bclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * Uppon first resume, only enable the clk and return, since the values of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * registers are not yet set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (!m4u_dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) static const struct dev_pm_ops mtk_iommu_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) SET_RUNTIME_PM_OPS(mtk_iommu_runtime_suspend, mtk_iommu_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) pm_runtime_force_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) static const struct mtk_iommu_plat_data mt2712_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) .m4u_plat = M4U_MT2712,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) .iova_region = single_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) .iova_region_nr = ARRAY_SIZE(single_domain),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) static const struct mtk_iommu_plat_data mt6779_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) .m4u_plat = M4U_MT6779,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) .flags = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) .iova_region = single_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) .iova_region_nr = ARRAY_SIZE(single_domain),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) .larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) static const struct mtk_iommu_plat_data mt8167_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) .m4u_plat = M4U_MT8167,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) .flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) .iova_region = single_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) .iova_region_nr = ARRAY_SIZE(single_domain),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) .larbid_remap = {{0}, {1}, {2}}, /* Linear mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) static const struct mtk_iommu_plat_data mt8173_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) .m4u_plat = M4U_MT8173,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) HAS_LEGACY_IVRP_PADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) .iova_region = single_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) .iova_region_nr = ARRAY_SIZE(single_domain),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static const struct mtk_iommu_plat_data mt8183_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) .m4u_plat = M4U_MT8183,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) .flags = RESET_AXI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) .iova_region = single_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) .iova_region_nr = ARRAY_SIZE(single_domain),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static const struct mtk_iommu_plat_data mt8192_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) .m4u_plat = M4U_MT8192,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) .flags = HAS_BCLK | HAS_SUB_COMM | OUT_ORDER_WR_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) WR_THROT_EN | IOVA_34_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) .iova_region = mt8192_multi_dom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) .larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {0, 14, 16}, {0, 13, 18, 17}},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static const struct of_device_id mtk_iommu_of_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) { .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) { .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static struct platform_driver mtk_iommu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) .probe = mtk_iommu_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) .remove = mtk_iommu_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) .name = "mtk-iommu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) .of_match_table = mtk_iommu_of_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) .pm = &mtk_iommu_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static int __init mtk_iommu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) ret = platform_driver_register(&mtk_iommu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) pr_err("Failed to register MTK IOMMU driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) subsys_initcall(mtk_iommu_init)