^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * IOMMU API for Rockchip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Module Authors: Simon Xue <xxm@rock-chips.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Daniel Kurtz <djkurtz@chromium.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/of_iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <soc/rockchip/rockchip_iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /** MMU register offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define RK_MMU_STATUS 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define RK_MMU_COMMAND 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define RK_MMU_INT_MASK 0x1C /* IRQ enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define RK_MMU_AUTO_GATING 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define DTE_ADDR_DUMMY 0xCAFEBABE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define RK_MMU_POLL_PERIOD_US 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define RK_MMU_FORCE_RESET_TIMEOUT_US 100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define RK_MMU_POLL_TIMEOUT_US 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* RK_MMU_STATUS fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define RK_MMU_STATUS_IDLE BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* RK_MMU_COMMAND command values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* RK_MMU_INT_* register fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define NUM_DT_ENTRIES 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define NUM_PT_ENTRIES 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define SPAGE_ORDER 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define SPAGE_SIZE (1 << SPAGE_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define DISABLE_FETCH_DTE_TIME_LIMIT BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define CMD_RETRY_COUNT 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Support mapping any size that fits in one page table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * 4 KiB to 4 MiB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define DT_LO_MASK 0xfffff000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define DT_HI_MASK GENMASK_ULL(39, 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define DT_SHIFT 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define DTE_BASE_HI_MASK GENMASK(11, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define PAGE_DESC_LO_MASK 0xfffff000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define PAGE_DESC_HI1_LOWER 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define PAGE_DESC_HI1_UPPER 35
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define PAGE_DESC_HI2_LOWER 36
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define PAGE_DESC_HI2_UPPER 39
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define PAGE_DESC_HI_MASK1 GENMASK_ULL(PAGE_DESC_HI1_UPPER, PAGE_DESC_HI1_LOWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define PAGE_DESC_HI_MASK2 GENMASK_ULL(PAGE_DESC_HI2_UPPER, PAGE_DESC_HI2_LOWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define DTE_HI1_LOWER 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define DTE_HI1_UPPER 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define DTE_HI2_LOWER 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define DTE_HI2_UPPER 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define DTE_HI_MASK1 GENMASK(DTE_HI1_UPPER, DTE_HI1_LOWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define DTE_HI_MASK2 GENMASK(DTE_HI2_UPPER, DTE_HI2_LOWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define PAGE_DESC_HI_SHIFT1 (PAGE_DESC_HI1_LOWER - DTE_HI1_LOWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define PAGE_DESC_HI_SHIFT2 (PAGE_DESC_HI2_LOWER - DTE_HI2_LOWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct rk_iommu_domain {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct list_head iommus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u32 *dt; /* page directory table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) dma_addr_t dt_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) spinlock_t iommus_lock; /* lock for iommus list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) spinlock_t dt_lock; /* lock for modifying page directory table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) bool shootdown_entire;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct iommu_domain domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct rockchip_iommu_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct rk_iommu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void __iomem **bases;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int num_mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int num_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct clk_bulk_data *clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int num_clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) bool reset_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) bool skip_read; /* rk3126/rk3128 can't read vop iommu registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) bool dlr_disable; /* avoid access iommu when runtime ops called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) bool cmd_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) bool master_handle_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct iommu_device iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct list_head node; /* entry in rk_iommu_domain.iommus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct iommu_domain *domain; /* domain to which iommu is attached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) bool shootdown_entire;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) bool iommu_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) bool need_res_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct rk_iommudata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct device_link *link; /* runtime PM link from IOMMU to master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct rk_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) bool defer_attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static struct device *dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static struct rk_iommu *rk_iommu_from_dev(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static char reserve_range[PAGE_SIZE] __aligned(PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static phys_addr_t res_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) size_t size = count * sizeof(u32); /* count of u32 entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return container_of(dom, struct rk_iommu_domain, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * The Rockchip rk3288 iommu uses a 2-level page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * The first level is the "Directory Table" (DT).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * to a "Page Table".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * The second level is the 1024 Page Tables (PT).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * a 4 KB page of physical memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Each iommu device has a MMU_DTE_ADDR register that contains the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * address of the start of the DT page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * The structure of the page table is as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * DT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * MMU_DTE_ADDR -> +-----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * +-----+ PT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * | DTE | -> +-----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * +-----+ | | Memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * | | +-----+ Page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * | | | PTE | -> +-----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * +-----+ +-----+ | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * | | | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * +-----+ | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * +-----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * Each DTE has a PT address and a valid bit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * +---------------------+-----------+-+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * | PT address | Reserved |V|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * +---------------------+-----------+-+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * 31:12 - PT address (PTs always starts on a 4 KB boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * 11: 1 - Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * 0 - 1 if PT @ PT address is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define RK_DTE_PT_ADDRESS_MASK 0xfffff000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define RK_DTE_PT_VALID BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * In v2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * 31:12 - PT address bit 31:0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * 11: 8 - PT address bit 35:32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * 7: 4 - PT address bit 39:36
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * 3: 1 - Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * 0 - 1 if PT @ PT address is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #define RK_DTE_PT_ADDRESS_MASK_V2 0xfffffff0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static inline phys_addr_t rk_dte_pt_address(u32 dte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static inline phys_addr_t rk_dte_pt_address_v2(u32 dte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) u64 dte_v2 = dte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) dte_v2 = ((dte_v2 & DTE_HI_MASK2) << PAGE_DESC_HI_SHIFT2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ((dte_v2 & DTE_HI_MASK1) << PAGE_DESC_HI_SHIFT1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) (dte_v2 & PAGE_DESC_LO_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return (phys_addr_t)dte_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static inline bool rk_dte_is_pt_valid(u32 dte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return dte & RK_DTE_PT_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static inline u32 rk_mk_dte(dma_addr_t pt_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static inline u32 rk_mk_dte_v2(dma_addr_t pt_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) pt_dma = (pt_dma & PAGE_DESC_LO_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ((pt_dma & PAGE_DESC_HI_MASK1) >> PAGE_DESC_HI_SHIFT1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) (pt_dma & PAGE_DESC_HI_MASK2) >> PAGE_DESC_HI_SHIFT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return (pt_dma & RK_DTE_PT_ADDRESS_MASK_V2) | RK_DTE_PT_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * Each PTE has a Page address, some flags and a valid bit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * +---------------------+---+-------+-+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * | Page address |Rsv| Flags |V|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * +---------------------+---+-------+-+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * 31:12 - Page address (Pages always start on a 4 KB boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * 11: 9 - Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * 8: 1 - Flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * 8 - Read allocate - allocate cache space on read misses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * 7 - Read cache - enable cache & prefetch of data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * 6 - Write buffer - enable delaying writes on their way to memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * 5 - Write allocate - allocate cache space on write misses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * 4 - Write cache - different writes can be merged together
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * 3 - Override cache attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * if 1, bits 4-8 control cache attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * if 0, the system bus defaults are used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * 2 - Writable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * 1 - Readable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * 0 - 1 if Page @ Page address is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #define RK_PTE_PAGE_WRITABLE BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #define RK_PTE_PAGE_READABLE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #define RK_PTE_PAGE_VALID BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * In v2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * 31:12 - Page address bit 31:0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * 11:9 - Page address bit 34:32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * 8:4 - Page address bit 39:35
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * 3 - Security
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * 2 - Writable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * 1 - Readable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * 0 - 1 if Page @ Page address is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #define RK_PTE_PAGE_ADDRESS_MASK_V2 0xfffffff0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) #define RK_PTE_PAGE_FLAGS_MASK_V2 0x0000000e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #define RK_PTE_PAGE_READABLE_V2 BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) #define RK_PTE_PAGE_WRITABLE_V2 BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #define RK_PTE_PAGE_REPRESENT BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static inline phys_addr_t rk_pte_page_address(u32 pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static inline phys_addr_t rk_pte_page_address_v2(u32 pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) u64 pte_v2 = pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) pte_v2 = ((pte_v2 & DTE_HI_MASK2) << PAGE_DESC_HI_SHIFT2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ((pte_v2 & DTE_HI_MASK1) << PAGE_DESC_HI_SHIFT1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) (pte_v2 & PAGE_DESC_LO_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return (phys_addr_t)pte_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static inline bool rk_pte_is_page_valid(u32 pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return pte & RK_PTE_PAGE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static inline bool rk_pte_is_page_represent(u32 pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return pte & RK_PTE_PAGE_REPRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* TODO: set cache flags per prot IOMMU_CACHE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static u32 rk_mk_pte(phys_addr_t page, int prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) flags |= (prot & IOMMU_PRIV) ? RK_PTE_PAGE_REPRESENT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) page &= RK_PTE_PAGE_ADDRESS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return page | flags | RK_PTE_PAGE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static u32 rk_mk_pte_v2(phys_addr_t page, int prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE_V2 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE_V2 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* If BIT(3) set, don't break iommu_map if BIT(0) set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Means we can reupdate a page that already presented. We can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * this bit to reupdate a pre-mapped 4G range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) flags |= (prot & IOMMU_PRIV) ? RK_PTE_PAGE_REPRESENT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) page = (page & PAGE_DESC_LO_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ((page & PAGE_DESC_HI_MASK1) >> PAGE_DESC_HI_SHIFT1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) (page & PAGE_DESC_HI_MASK2) >> PAGE_DESC_HI_SHIFT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) page &= RK_PTE_PAGE_ADDRESS_MASK_V2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return page | flags | RK_PTE_PAGE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static u32 rk_mk_pte_invalid(u32 pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return pte & ~(RK_PTE_PAGE_VALID | RK_PTE_PAGE_REPRESENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * rk3288 iova (IOMMU Virtual Address) format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * 31 22.21 12.11 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * +-----------+-----------+-------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * | DTE index | PTE index | Page offset |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * +-----------+-----------+-------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * 31:22 - DTE index - index of DTE in DT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * 11: 0 - Page offset - offset into page @ PTE.page_address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) #define RK_IOVA_DTE_MASK 0xffc00000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #define RK_IOVA_DTE_SHIFT 22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) #define RK_IOVA_PTE_MASK 0x003ff000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) #define RK_IOVA_PTE_SHIFT 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #define RK_IOVA_PAGE_MASK 0x00000fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) #define RK_IOVA_PAGE_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static u32 rk_iova_dte_index(dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static u32 rk_iova_pte_index(dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static u32 rk_iova_page_offset(dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static u32 rk_iommu_read(void __iomem *base, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return readl(base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) writel(value, base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) for (i = 0; i < iommu->num_mmu; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) writel(command, iommu->bases[i] + RK_MMU_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static void rk_iommu_base_command(void __iomem *base, u32 command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) writel(command, base + RK_MMU_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) dma_addr_t iova_end = iova_start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * TODO(djkurtz): Figure out when it is more efficient to shootdown the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * entire iotlb rather than iterate over individual iovas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) for (i = 0; i < iommu->num_mmu; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) dma_addr_t iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) bool active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) for (i = 0; i < iommu->num_mmu; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) RK_MMU_STATUS_STALL_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) bool enable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) for (i = 0; i < iommu->num_mmu; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) RK_MMU_STATUS_PAGING_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static bool rk_iommu_is_reset_done(struct rk_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) bool done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) for (i = 0; i < iommu->num_mmu; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static int rk_iommu_enable_stall(struct rk_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) bool val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) int retry_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (iommu->skip_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) goto read_wa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (rk_iommu_is_stall_active(iommu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* Stall can only be enabled if paging is enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (!rk_iommu_is_paging_enabled(iommu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) read_wa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (iommu->skip_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) val, RK_MMU_POLL_PERIOD_US,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) RK_MMU_POLL_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) for (i = 0; i < iommu->num_mmu; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) dev_err(iommu->dev, "Enable stall request timed out, retry_count = %d, status: %#08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) retry_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) goto read_wa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static int rk_iommu_disable_stall(struct rk_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) bool val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) int retry_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (iommu->skip_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) goto read_wa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (!rk_iommu_is_stall_active(iommu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) read_wa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (iommu->skip_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) !val, RK_MMU_POLL_PERIOD_US,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) RK_MMU_POLL_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) for (i = 0; i < iommu->num_mmu; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) dev_err(iommu->dev, "Disable stall request timed out, retry_count = %d, status: %#08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) retry_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) goto read_wa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static int rk_iommu_enable_paging(struct rk_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) bool val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) int retry_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (iommu->skip_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) goto read_wa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (rk_iommu_is_paging_enabled(iommu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) read_wa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (iommu->skip_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) val, RK_MMU_POLL_PERIOD_US,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) RK_MMU_POLL_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) for (i = 0; i < iommu->num_mmu; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) dev_err(iommu->dev, "Enable paging request timed out, retry_count = %d, status: %#08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) retry_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) goto read_wa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static int rk_iommu_disable_paging(struct rk_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) bool val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) int retry_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (iommu->skip_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) goto read_wa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (!rk_iommu_is_paging_enabled(iommu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) read_wa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (iommu->skip_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) !val, RK_MMU_POLL_PERIOD_US,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) RK_MMU_POLL_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) for (i = 0; i < iommu->num_mmu; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) dev_err(iommu->dev, "Disable paging request timed out, retry_count = %d, status: %#08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) retry_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (iommu->cmd_retry && (retry_count++ < CMD_RETRY_COUNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) goto read_wa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static u32 rk_iommu_read_dte_addr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return rk_iommu_read(base, RK_MMU_DTE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static int rk_iommu_force_reset(struct rk_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) u32 dte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) bool val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) u32 address_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (iommu->reset_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (iommu->skip_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) goto read_wa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * and verifying that upper 5 nybbles are read back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * In v2: upper 7 nybbles are read back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) for (i = 0; i < iommu->num_mmu; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (iommu->version >= 0x2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) address_mask = RK_DTE_PT_ADDRESS_MASK_V2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) address_mask = RK_DTE_PT_ADDRESS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ret = readx_poll_timeout(rk_iommu_read_dte_addr, iommu->bases[i], dte_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) dte_addr == (DTE_ADDR_DUMMY & address_mask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) RK_MMU_POLL_PERIOD_US, RK_MMU_POLL_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) read_wa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (iommu->skip_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) val, RK_MMU_POLL_TIMEOUT_US,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) RK_MMU_FORCE_RESET_TIMEOUT_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) dev_err(iommu->dev, "FORCE_RESET command timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) void __iomem *base = iommu->bases[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) u32 dte_index, pte_index, page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) u32 mmu_dte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) u32 *dte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) u32 dte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) phys_addr_t pte_addr_phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) u32 *pte_addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) u32 pte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) phys_addr_t page_addr_phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) u32 page_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) dte_index = rk_iova_dte_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) pte_index = rk_iova_pte_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) page_offset = rk_iova_page_offset(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (iommu->version >= 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) mmu_dte_addr_phys = (mmu_dte_addr_phys & DT_LO_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) ((mmu_dte_addr_phys & DTE_BASE_HI_MASK) << DT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) dte_addr = phys_to_virt(dte_addr_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) dte = *dte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (!rk_dte_is_pt_valid(dte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) goto print_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (iommu->version >= 0x2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) pte_addr_phys = rk_dte_pt_address_v2(dte) + (pte_index * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) pte_addr = phys_to_virt(pte_addr_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) pte = *pte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (!rk_pte_is_page_valid(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) goto print_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (iommu->version >= 0x2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) page_addr_phys = rk_pte_page_address_v2(pte) + page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) page_addr_phys = rk_pte_page_address(pte) + page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) print_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) &iova, dte_index, pte_index, page_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) &mmu_dte_addr_phys, &dte_addr_phys, dte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static int rk_pagefault_done(struct rk_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) u32 int_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) dma_addr_t iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) u32 int_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) for (i = 0; i < iommu->num_mmu; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (int_status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) dev_err(iommu->dev, "Page fault at %pad of type %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) &iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) log_iova(iommu, i, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (!iommu->master_handle_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * Report page fault to any installed handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * Ignore the return code, though, since we always zap cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * and clear the page fault anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (iommu->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) report_iommu_fault(iommu->domain, iommu->dev, iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * Master may clear the int_mask to prevent iommu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * re-enter interrupt when mapping. So we postpone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * sending PAGE_FAULT_DONE command to mapping finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int_mask = rk_iommu_read(iommu->bases[i], RK_MMU_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (int_mask != 0x0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (int_status & RK_MMU_IRQ_BUS_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (int_status & ~RK_MMU_IRQ_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) dev_err(iommu->dev, "unexpected int_status: %#08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) int_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) int rockchip_pagefault_done(struct device *master_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct rk_iommu *iommu = rk_iommu_from_dev(master_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return rk_pagefault_done(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) EXPORT_SYMBOL_GPL(rockchip_pagefault_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) void __iomem *rockchip_get_iommu_base(struct device *master_dev, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct rk_iommu *iommu = rk_iommu_from_dev(master_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return iommu->bases[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) EXPORT_SYMBOL_GPL(rockchip_get_iommu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct rk_iommu *iommu = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) err = pm_runtime_get_if_in_use(iommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (WARN_ON_ONCE(err <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* Master must call rockchip_pagefault_done to handle pagefault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (iommu->master_handle_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (iommu->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ret = report_iommu_fault(iommu->domain, iommu->dev, -1, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) ret = rk_pagefault_done(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) clk_bulk_disable(iommu->num_clocks, iommu->clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) pm_runtime_put(iommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) phys_addr_t pt_phys, phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) u32 dte, pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) u32 *page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) spin_lock_irqsave(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dte = rk_domain->dt[rk_iova_dte_index(iova)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (!rk_dte_is_pt_valid(dte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) pt_phys = rk_dte_pt_address(dte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) page_table = (u32 *)phys_to_virt(pt_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) pte = page_table[rk_iova_pte_index(iova)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (!rk_pte_is_page_valid(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) static phys_addr_t rk_iommu_iova_to_phys_v2(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) phys_addr_t pt_phys, phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) u32 dte, pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) u32 *page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) spin_lock_irqsave(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) dte = rk_domain->dt[rk_iova_dte_index(iova)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (!rk_dte_is_pt_valid(dte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) pt_phys = rk_dte_pt_address_v2(dte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) page_table = (u32 *)phys_to_virt(pt_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) pte = page_table[rk_iova_pte_index(iova)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (!rk_pte_is_page_valid(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) phys = rk_pte_page_address_v2(pte) + rk_iova_page_offset(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) dma_addr_t iova, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct list_head *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /* shootdown these iova from all iommus using this domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) spin_lock_irqsave(&rk_domain->iommus_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) list_for_each(pos, &rk_domain->iommus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct rk_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) iommu = list_entry(pos, struct rk_iommu, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /* Only zap TLBs of IOMMUs that are powered on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) ret = pm_runtime_get_if_in_use(iommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (WARN_ON_ONCE(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) WARN_ON(clk_bulk_enable(iommu->num_clocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) iommu->clocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) rk_iommu_zap_lines(iommu, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) clk_bulk_disable(iommu->num_clocks, iommu->clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) pm_runtime_put(iommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) dma_addr_t iova, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (size > SPAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) SPAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) u32 *page_table, *dte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) u32 dte_index, dte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) phys_addr_t pt_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) dma_addr_t pt_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) assert_spin_locked(&rk_domain->dt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) dte_index = rk_iova_dte_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) dte_addr = &rk_domain->dt[dte_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) dte = *dte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (rk_dte_is_pt_valid(dte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (!page_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (dma_mapping_error(dma_dev, pt_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) dev_err(dma_dev, "DMA mapping error while allocating page table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) free_page((unsigned long)page_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) dte = rk_mk_dte(pt_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) *dte_addr = dte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) rk_table_flush(rk_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) rk_domain->dt_dma + dte_index * sizeof(u32), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) pt_phys = rk_dte_pt_address(dte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return (u32 *)phys_to_virt(pt_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static u32 *rk_dte_get_page_table_v2(struct rk_iommu_domain *rk_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) u32 *page_table, *dte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) u32 dte_index, dte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) phys_addr_t pt_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) dma_addr_t pt_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) assert_spin_locked(&rk_domain->dt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) dte_index = rk_iova_dte_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) dte_addr = &rk_domain->dt[dte_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) dte = *dte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (rk_dte_is_pt_valid(dte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (!page_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (dma_mapping_error(dma_dev, pt_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) dev_err(dma_dev, "DMA mapping error while allocating page table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) free_page((unsigned long)page_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) dte = rk_mk_dte_v2(pt_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) *dte_addr = dte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) rk_table_flush(rk_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) rk_domain->dt_dma + dte_index * sizeof(u32), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) pt_phys = rk_dte_pt_address_v2(dte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return (u32 *)phys_to_virt(pt_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) u32 *pte_addr, dma_addr_t pte_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) size_t size, struct rk_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) unsigned int pte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) unsigned int pte_total = size / SPAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) assert_spin_locked(&rk_domain->dt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) for (pte_count = 0; pte_count < pte_total; pte_count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) u32 pte = pte_addr[pte_count];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (!rk_pte_is_page_valid(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (iommu && iommu->need_res_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (iommu->version >= 0x2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) pte_addr[pte_count] = rk_mk_pte_v2(res_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) pte_addr[pte_count] = rk_mk_pte(res_page, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) pte_addr[pte_count] = rk_mk_pte_invalid(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) rk_table_flush(rk_domain, pte_dma, pte_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return pte_count * SPAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static struct rk_iommu *rk_iommu_get(struct rk_iommu_domain *rk_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct list_head *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) struct rk_iommu *iommu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) spin_lock_irqsave(&rk_domain->iommus_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) list_for_each(pos, &rk_domain->iommus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) iommu = list_entry(pos, struct rk_iommu, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (iommu->need_res_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) dma_addr_t pte_dma, dma_addr_t iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) phys_addr_t paddr, size_t size, int prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) unsigned int pte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) unsigned int pte_total = size / SPAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) phys_addr_t page_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) assert_spin_locked(&rk_domain->dt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) for (pte_count = 0; pte_count < pte_total; pte_count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) u32 pte = pte_addr[pte_count];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (rk_pte_is_page_valid(pte) && !rk_pte_is_page_represent(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (prot & IOMMU_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) pte_addr[pte_count] = rk_mk_pte(res_page, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) pte_addr[pte_count] = rk_mk_pte(paddr, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) paddr += SPAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) rk_table_flush(rk_domain, pte_dma, pte_total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * Zap the first and last iova to evict from iotlb any previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * mapped cachelines holding stale values for its dte and pte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * We only zap the first and last iova, since only they could have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * dte or pte shared with an existing mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /* Do not zap tlb cache line if shootdown_entire set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (!rk_domain->shootdown_entire)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) rk_iommu_zap_iova_first_last(rk_domain, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) unwind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /* Unmap the range of iovas that we just mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) pte_count * SPAGE_SIZE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) iova += pte_count * SPAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) page_phys = rk_pte_page_address(pte_addr[pte_count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) &iova, &page_phys, &paddr, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) static int rk_iommu_map_iova_v2(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) dma_addr_t pte_dma, dma_addr_t iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) phys_addr_t paddr, size_t size, int prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) unsigned int pte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) unsigned int pte_total = size / SPAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) phys_addr_t page_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) assert_spin_locked(&rk_domain->dt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) for (pte_count = 0; pte_count < pte_total; pte_count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) u32 pte = pte_addr[pte_count];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (rk_pte_is_page_valid(pte) && !rk_pte_is_page_represent(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (prot & IOMMU_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) pte_addr[pte_count] = rk_mk_pte_v2(res_page, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) pte_addr[pte_count] = rk_mk_pte_v2(paddr, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) paddr += SPAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) rk_table_flush(rk_domain, pte_dma, pte_total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * Zap the first and last iova to evict from iotlb any previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * mapped cachelines holding stale values for its dte and pte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * We only zap the first and last iova, since only they could have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * dte or pte shared with an existing mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /* Do not zap tlb cache line if shootdown_entire set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (!rk_domain->shootdown_entire)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) rk_iommu_zap_iova_first_last(rk_domain, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) unwind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /* Unmap the range of iovas that we just mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) pte_count * SPAGE_SIZE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) iova += pte_count * SPAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) page_phys = rk_pte_page_address_v2(pte_addr[pte_count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) &iova, &page_phys, &paddr, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) u32 *page_table, *pte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) u32 dte, pte_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) spin_lock_irqsave(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * pgsize_bitmap specifies iova sizes that fit in one page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * (1024 4-KiB pages = 4 MiB).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * So, size will always be 4096 <= size <= 4194304.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * Since iommu_map() guarantees that both iova and size will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * aligned, we will always only be mapping from a single dte here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) page_table = rk_dte_get_page_table(rk_domain, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (IS_ERR(page_table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return PTR_ERR(page_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) dte = rk_domain->dt[rk_iova_dte_index(iova)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) pte_index = rk_iova_pte_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) pte_addr = &page_table[pte_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) pte_dma = rk_dte_pt_address(dte) + pte_index * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) paddr, size, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static int rk_iommu_map_v2(struct iommu_domain *domain, unsigned long _iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) u32 *page_table, *pte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) u32 dte, pte_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) spin_lock_irqsave(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * pgsize_bitmap specifies iova sizes that fit in one page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * (1024 4-KiB pages = 4 MiB).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * So, size will always be 4096 <= size <= 4194304.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * Since iommu_map() guarantees that both iova and size will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * aligned, we will always only be mapping from a single dte here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) page_table = rk_dte_get_page_table_v2(rk_domain, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (IS_ERR(page_table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) return PTR_ERR(page_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) dte = rk_domain->dt[rk_iova_dte_index(iova)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) pte_index = rk_iova_pte_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) pte_addr = &page_table[pte_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) pte_dma = rk_dte_pt_address_v2(dte) + pte_index * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) ret = rk_iommu_map_iova_v2(rk_domain, pte_addr, pte_dma, iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) paddr, size, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) size_t size, struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) phys_addr_t pt_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) u32 dte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) u32 *pte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) size_t unmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct rk_iommu *iommu = rk_iommu_get(rk_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) spin_lock_irqsave(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * pgsize_bitmap specifies iova sizes that fit in one page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * (1024 4-KiB pages = 4 MiB).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * So, size will always be 4096 <= size <= 4194304.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * Since iommu_unmap() guarantees that both iova and size will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * aligned, we will always only be unmapping from a single dte here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) dte = rk_domain->dt[rk_iova_dte_index(iova)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) /* Just return 0 if iova is unmapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (!rk_dte_is_pt_valid(dte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) pt_phys = rk_dte_pt_address(dte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* Shootdown iotlb entries for iova range that was just unmapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) rk_iommu_zap_iova(rk_domain, iova, unmap_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return unmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) static size_t rk_iommu_unmap_v2(struct iommu_domain *domain, unsigned long _iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) size_t size, struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) phys_addr_t pt_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) u32 dte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) u32 *pte_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) size_t unmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) struct rk_iommu *iommu = rk_iommu_get(rk_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) spin_lock_irqsave(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * pgsize_bitmap specifies iova sizes that fit in one page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * (1024 4-KiB pages = 4 MiB).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * So, size will always be 4096 <= size <= 4194304.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * Since iommu_unmap() guarantees that both iova and size will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * aligned, we will always only be unmapping from a single dte here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) dte = rk_domain->dt[rk_iova_dte_index(iova)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /* Just return 0 if iova is unmapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (!rk_dte_is_pt_valid(dte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) pt_phys = rk_dte_pt_address_v2(dte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /* Shootdown iotlb entries for iova range that was just unmapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /* Do not zap tlb cache line if shootdown_entire set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (!rk_domain->shootdown_entire)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) rk_iommu_zap_iova(rk_domain, iova, unmap_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return unmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) static void rk_iommu_flush_tlb_all(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct list_head *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) spin_lock_irqsave(&rk_domain->iommus_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) list_for_each(pos, &rk_domain->iommus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) struct rk_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) iommu = list_entry(pos, struct rk_iommu, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) ret = pm_runtime_get_if_in_use(iommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (WARN_ON_ONCE(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) for (i = 0; i < iommu->num_mmu; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) rk_iommu_write(iommu->bases[i], RK_MMU_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) RK_MMU_CMD_ZAP_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) clk_bulk_disable(iommu->num_clocks, iommu->clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) pm_runtime_put(iommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) struct rk_iommudata *data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return data ? data->iommu : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /* Must be called with iommu powered on and attached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static void rk_iommu_disable(struct rk_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) /* Ignore error while disabling, just keep going */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) rk_iommu_enable_stall(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) rk_iommu_disable_paging(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) for (i = 0; i < iommu->num_mmu; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) rk_iommu_disable_stall(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) clk_bulk_disable(iommu->num_clocks, iommu->clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) iommu->iommu_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) int rockchip_iommu_disable(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) struct rk_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) iommu = rk_iommu_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) rk_iommu_disable(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) EXPORT_SYMBOL(rockchip_iommu_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /* Must be called with iommu powered on and attached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static int rk_iommu_enable(struct rk_iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct iommu_domain *domain = iommu->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) u32 dt_v2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) u32 auto_gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) ret = rk_iommu_enable_stall(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) goto out_disable_clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) ret = rk_iommu_force_reset(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) goto out_disable_stall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) for (i = 0; i < iommu->num_mmu; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (iommu->version >= 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) dt_v2 = (rk_domain->dt_dma & DT_LO_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) ((rk_domain->dt_dma & DT_HI_MASK) >> DT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dt_v2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) rk_domain->dt_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) /* Workaround for iommu blocked, BIT(31) default to 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) auto_gate = rk_iommu_read(iommu->bases[i], RK_MMU_AUTO_GATING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) auto_gate |= DISABLE_FETCH_DTE_TIME_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) rk_iommu_write(iommu->bases[i], RK_MMU_AUTO_GATING, auto_gate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ret = rk_iommu_enable_paging(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) out_disable_stall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) rk_iommu_disable_stall(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) out_disable_clocks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) clk_bulk_disable(iommu->num_clocks, iommu->clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) iommu->iommu_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) int rockchip_iommu_enable(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) struct rk_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) iommu = rk_iommu_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return rk_iommu_enable(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) EXPORT_SYMBOL(rockchip_iommu_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) bool rockchip_iommu_is_enabled(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) struct rk_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) iommu = rk_iommu_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) return iommu->iommu_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) EXPORT_SYMBOL(rockchip_iommu_is_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) static void rk_iommu_detach_device(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) struct rk_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /* Allow 'virtual devices' (eg drm) to detach from domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) iommu = rk_iommu_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) dev_dbg(dev, "Detaching from iommu domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (!iommu->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) iommu->domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) spin_lock_irqsave(&rk_domain->iommus_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) list_del_init(&iommu->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) ret = pm_runtime_get_if_in_use(iommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) WARN_ON_ONCE(ret < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) rk_iommu_disable(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) pm_runtime_put(iommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static int rk_iommu_attach_device(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct rk_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) * Allow 'virtual devices' (e.g., drm) to attach to domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * Such a device does not belong to an iommu group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) iommu = rk_iommu_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) dev_dbg(dev, "Attaching to iommu domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (iommu->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) rk_iommu_detach_device(iommu->domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) iommu->domain = domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /* Attach NULL for disable iommu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (!domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) spin_lock_irqsave(&rk_domain->iommus_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) list_add_tail(&iommu->node, &rk_domain->iommus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) rk_domain->shootdown_entire = iommu->shootdown_entire;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) ret = pm_runtime_get_if_in_use(iommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (!ret || WARN_ON_ONCE(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) ret = rk_iommu_enable(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) rk_iommu_detach_device(iommu->domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) pm_runtime_put(iommu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct rk_iommu_domain *rk_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (!dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (!rk_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (type == IOMMU_DOMAIN_DMA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) iommu_get_dma_cookie(&rk_domain->domain))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) goto err_free_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * rk32xx iommus use a 2 level pagetable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * Allocate one 4 KiB page for each table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (!rk_domain->dt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) goto err_put_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) SPAGE_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) dev_err(dma_dev, "DMA map error for DT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) goto err_free_dt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) spin_lock_init(&rk_domain->iommus_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) spin_lock_init(&rk_domain->dt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) INIT_LIST_HEAD(&rk_domain->iommus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) rk_domain->domain.geometry.aperture_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) rk_domain->domain.geometry.force_aperture = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) return &rk_domain->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) err_free_dt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) free_page((unsigned long)rk_domain->dt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) err_put_cookie:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (type == IOMMU_DOMAIN_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) iommu_put_dma_cookie(&rk_domain->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) err_free_domain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) kfree(rk_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) static void rk_iommu_domain_free(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) WARN_ON(!list_empty(&rk_domain->iommus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) for (i = 0; i < NUM_DT_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) u32 dte = rk_domain->dt[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) if (rk_dte_is_pt_valid(dte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) phys_addr_t pt_phys = rk_dte_pt_address(dte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) u32 *page_table = phys_to_virt(pt_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) dma_unmap_single(dma_dev, pt_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) SPAGE_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) free_page((unsigned long)page_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) dma_unmap_single(dma_dev, rk_domain->dt_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) SPAGE_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) free_page((unsigned long)rk_domain->dt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (domain->type == IOMMU_DOMAIN_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) iommu_put_dma_cookie(&rk_domain->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) kfree(rk_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) static void rk_iommu_domain_free_v2(struct iommu_domain *domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) WARN_ON(!list_empty(&rk_domain->iommus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) for (i = 0; i < NUM_DT_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) u32 dte = rk_domain->dt[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (rk_dte_is_pt_valid(dte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) phys_addr_t pt_phys = rk_dte_pt_address_v2(dte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) u32 *page_table = phys_to_virt(pt_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) dma_unmap_single(dma_dev, pt_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) SPAGE_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) free_page((unsigned long)page_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) dma_unmap_single(dma_dev, rk_domain->dt_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) SPAGE_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) free_page((unsigned long)rk_domain->dt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (domain->type == IOMMU_DOMAIN_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) iommu_put_dma_cookie(&rk_domain->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) kfree(rk_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) static struct iommu_device *rk_iommu_probe_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) struct rk_iommudata *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct rk_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) iommu = rk_iommu_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) data->link = device_link_add(dev, iommu->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) data->defer_attach = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) /* set max segment size for dev, needed for single chunk map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (!dev->dma_parms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (!dev->dma_parms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return &iommu->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) static void rk_iommu_release_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) struct rk_iommudata *data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) device_link_del(data->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) static struct iommu_group *rk_iommu_device_group(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) struct rk_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) iommu = rk_iommu_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) return iommu_group_ref_get(iommu->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) static bool rk_iommu_is_attach_deferred(struct iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) struct rk_iommudata *data = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) return data->defer_attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) static int rk_iommu_of_xlate(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) struct of_phandle_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) struct platform_device *iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) struct rk_iommudata *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) iommu_dev = of_find_device_by_node(args->np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) data->iommu = platform_get_drvdata(iommu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (strstr(dev_name(dev), "vop"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) data->defer_attach = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) dev_iommu_priv_set(dev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) platform_device_put(iommu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) void rockchip_iommu_mask_irq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) struct rk_iommu *iommu = rk_iommu_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) for (i = 0; i < iommu->num_mmu; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) EXPORT_SYMBOL(rockchip_iommu_mask_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) void rockchip_iommu_unmask_irq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct rk_iommu *iommu = rk_iommu_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) for (i = 0; i < iommu->num_mmu; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) /* Need to zap tlb in case of mapping during pagefault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) /* Leave iommu in pagefault state until mapping finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) EXPORT_SYMBOL(rockchip_iommu_unmask_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) static struct iommu_ops rk_iommu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) .domain_alloc = rk_iommu_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) .domain_free = rk_iommu_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) .attach_dev = rk_iommu_attach_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) .detach_dev = rk_iommu_detach_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) .map = rk_iommu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) .unmap = rk_iommu_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) .flush_iotlb_all = rk_iommu_flush_tlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) .probe_device = rk_iommu_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) .release_device = rk_iommu_release_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) .iova_to_phys = rk_iommu_iova_to_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) .is_attach_deferred = rk_iommu_is_attach_deferred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) .device_group = rk_iommu_device_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) .of_xlate = rk_iommu_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) static struct iommu_ops rk_iommu_ops_v2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) .domain_alloc = rk_iommu_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) .domain_free = rk_iommu_domain_free_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) .attach_dev = rk_iommu_attach_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) .detach_dev = rk_iommu_detach_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) .map = rk_iommu_map_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) .unmap = rk_iommu_unmap_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) .flush_iotlb_all = rk_iommu_flush_tlb_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) .probe_device = rk_iommu_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) .release_device = rk_iommu_release_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) .iova_to_phys = rk_iommu_iova_to_phys_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) .is_attach_deferred = rk_iommu_is_attach_deferred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) .device_group = rk_iommu_device_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) .of_xlate = rk_iommu_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) static const struct rockchip_iommu_data iommu_data_v1 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) .version = 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) static const struct rockchip_iommu_data iommu_data_v2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) .version = 0x2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) static const struct of_device_id rk_iommu_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) .compatible = "rockchip,iommu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) .data = &iommu_data_v1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) .compatible = "rockchip,iommu-v2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) .data = &iommu_data_v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) static int rk_iommu_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) struct rk_iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) int num_res = pdev->num_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) struct rockchip_iommu_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) match = of_match_device(rk_iommu_dt_ids, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (!match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) data = (struct rockchip_iommu_data *)match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) iommu->version = data->version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) dev_info(dev, "version = %x\n", iommu->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) platform_set_drvdata(pdev, iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) iommu->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) iommu->num_mmu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if (!iommu->bases)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) for (i = 0; i < num_res; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) res = platform_get_resource(pdev, IORESOURCE_MEM, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (IS_ERR(iommu->bases[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) iommu->num_mmu++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (iommu->num_mmu == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) return PTR_ERR(iommu->bases[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) iommu->num_irq = platform_irq_count(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) if (iommu->num_irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) return iommu->num_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) iommu->reset_disabled = device_property_read_bool(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) "rockchip,disable-mmu-reset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) iommu->skip_read = device_property_read_bool(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) "rockchip,skip-mmu-read");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) iommu->dlr_disable = device_property_read_bool(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) "rockchip,disable-device-link-resume");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) iommu->shootdown_entire = device_property_read_bool(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) "rockchip,shootdown-entire");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) iommu->master_handle_irq = device_property_read_bool(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) "rockchip,master-handle-irq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (of_machine_is_compatible("rockchip,rv1126") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) of_machine_is_compatible("rockchip,rv1109"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) iommu->cmd_retry = device_property_read_bool(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) "rockchip,enable-cmd-retry");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) iommu->need_res_map = device_property_read_bool(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) "rockchip,reserve-map");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * iommu clocks should be present for all new devices and devicetrees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * but there are older devicetrees without clocks out in the wild.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * So clocks as optional for the time being.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) err = devm_clk_bulk_get_all(dev, &iommu->clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) if (err == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) iommu->num_clocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) else if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) iommu->num_clocks = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) iommu->group = iommu_group_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (IS_ERR(iommu->group)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) err = PTR_ERR(iommu->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) goto err_unprepare_clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) goto err_put_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (iommu->version >= 0x2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops_v2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) err = iommu_device_register(&iommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) goto err_remove_sysfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) * Use the first registered IOMMU device for domain to use with DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) * API, since a domain might not physically correspond to a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) * IOMMU device..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if (!dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) dma_dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (iommu->version >= 0x2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) bus_set_iommu(&platform_bus_type, &rk_iommu_ops_v2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (iommu->skip_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) goto skip_request_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) for (i = 0; i < iommu->num_irq; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) int irq = platform_get_irq(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) IRQF_SHARED, dev_name(dev), iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) goto err_remove_sysfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) skip_request_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (!res_page && iommu->need_res_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) res_page = __pa_symbol(reserve_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) pr_info("%s,%d, res_page = 0x%pa\n", __func__, __LINE__, &res_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) err_remove_sysfs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) iommu_device_sysfs_remove(&iommu->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) err_put_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) iommu_group_put(iommu->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) err_unprepare_clocks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) static void rk_iommu_shutdown(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) struct rk_iommu *iommu = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) for (i = 0; i < iommu->num_irq; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) int irq = platform_get_irq(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) devm_free_irq(iommu->dev, irq, iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) pm_runtime_force_suspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) static int __maybe_unused rk_iommu_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) struct rk_iommu *iommu = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (!iommu->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) if (iommu->dlr_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) rk_iommu_disable(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) static int __maybe_unused rk_iommu_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) struct rk_iommu *iommu = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (!iommu->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if (iommu->dlr_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) return rk_iommu_enable(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) static const struct dev_pm_ops rk_iommu_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) SET_RUNTIME_PM_OPS(rk_iommu_suspend, rk_iommu_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) pm_runtime_force_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) static struct platform_driver rk_iommu_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) .probe = rk_iommu_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) .shutdown = rk_iommu_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) .name = "rk_iommu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) .of_match_table = rk_iommu_dt_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) .pm = &rk_iommu_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) static int __init rk_iommu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) return platform_driver_register(&rk_iommu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) subsys_initcall(rk_iommu_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) MODULE_DESCRIPTION("IOMMU API for Rockchip");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) MODULE_ALIAS("platform:rockchip-iommu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) MODULE_LICENSE("GPL v2");