^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * http://www.samsung.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kmemleak.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of_iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) typedef u32 sysmmu_iova_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) typedef u32 sysmmu_pte_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* We do not consider super section mapping (16MB) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define SECT_ORDER 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define LPAGE_ORDER 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define SPAGE_ORDER 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define SECT_SIZE (1 << SECT_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define LPAGE_SIZE (1 << LPAGE_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define SPAGE_SIZE (1 << SPAGE_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define SECT_MASK (~(SECT_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define LPAGE_MASK (~(LPAGE_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define SPAGE_MASK (~(SPAGE_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ((*(sent) & 3) == 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define lv1ent_section(sent) ((*(sent) & 3) == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define lv2ent_small(pent) ((*(pent) & 2) == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define lv2ent_large(pent) ((*(pent) & 3) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * v5.0 introduced support for 36bit physical address space by shifting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * all page entry values by 4 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * All SYSMMU controllers in the system support the address spaces of the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * value (0 or 4).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static short PG_ENT_SHIFT = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define SYSMMU_PG_ENT_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define SYSMMU_V5_PG_ENT_SHIFT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static const sysmmu_pte_t *LV1_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) ((0 << 15) | (0 << 10)), /* no access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) (0 << 4), /* no access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) (1 << 4), /* IOMMU_READ only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) (2 << 4), /* IOMMU_WRITE only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static const sysmmu_pte_t *LV2_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ((0 << 9) | (0 << 4)), /* no access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) (0 << 2), /* no access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) (1 << 2), /* IOMMU_READ only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) (2 << 2), /* IOMMU_WRITE only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define section_offs(iova) (iova & (SECT_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define NUM_LV1ENTRIES 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static u32 lv1ent_offset(sysmmu_iova_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return iova >> SECT_ORDER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static u32 lv2ent_offset(sysmmu_iova_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define CTRL_ENABLE 0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define CTRL_BLOCK 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define CTRL_DISABLE 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define CFG_LRU 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define CFG_EAP (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define CFG_QOS(n) ((n & 0xF) << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* common registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define REG_MMU_CTRL 0x000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define REG_MMU_CFG 0x004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define REG_MMU_STATUS 0x008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define REG_MMU_VERSION 0x034
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define MMU_MAJ_VER(val) ((val) >> 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define MMU_MIN_VER(val) ((val) & 0x7F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* v1.x - v3.x registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define REG_MMU_FLUSH 0x00C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define REG_MMU_FLUSH_ENTRY 0x010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define REG_PT_BASE_ADDR 0x014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define REG_INT_STATUS 0x018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define REG_INT_CLEAR 0x01C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define REG_PAGE_FAULT_ADDR 0x024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define REG_AW_FAULT_ADDR 0x028
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define REG_AR_FAULT_ADDR 0x02C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define REG_DEFAULT_SLAVE_ADDR 0x030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* v5.x registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define REG_V5_PT_BASE_PFN 0x00C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define REG_V5_MMU_FLUSH_ALL 0x010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define REG_V5_MMU_FLUSH_ENTRY 0x014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define REG_V5_MMU_FLUSH_RANGE 0x018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define REG_V5_MMU_FLUSH_START 0x020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define REG_V5_MMU_FLUSH_END 0x024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define REG_V5_INT_STATUS 0x060
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define REG_V5_INT_CLEAR 0x064
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define REG_V5_FAULT_AR_VA 0x070
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define REG_V5_FAULT_AW_VA 0x080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static struct device *dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static struct kmem_cache *lv2table_kmem_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static sysmmu_pte_t *zero_lv2_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return pgtable + lv1ent_offset(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return (sysmmu_pte_t *)phys_to_virt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) lv2table_base(sent)) + lv2ent_offset(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * IOMMU fault information register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct sysmmu_fault_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) unsigned int bit; /* bit number in STATUS register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned short addr_reg; /* register to read VA fault address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) const char *name; /* human readable fault name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned int type; /* fault type for report_iommu_fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static const struct sysmmu_fault_info sysmmu_faults[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * This structure is attached to dev->iommu->priv of the master device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * on device add, contains a list of SYSMMU controllers defined by device tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * which are bound to given master device. It is usually referenced by 'owner'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct exynos_iommu_owner {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct iommu_domain *domain; /* domain this device is attached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct mutex rpm_lock; /* for runtime pm of all sysmmus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * This structure exynos specific generalization of struct iommu_domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * It contains list of SYSMMU controllers from all master devices, which has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * been attached to this domain and page tables of IO address space defined by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * it. It is usually referenced by 'domain' pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct exynos_iommu_domain {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct list_head clients; /* list of sysmmu_drvdata.domain_node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) short *lv2entcnt; /* free lv2 entry counter for each section */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) spinlock_t lock; /* lock for modyfying list of clients */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct iommu_domain domain; /* generic domain data structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * This structure hold all data of a single SYSMMU controller, this includes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * hw resources like registers and clocks, pointers and list nodes to connect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * it to all other structures, internal state and parameters read from device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * tree. It is usually referenced by 'data' pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct sysmmu_drvdata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct device *sysmmu; /* SYSMMU controller device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct device *master; /* master device (owner) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct device_link *link; /* runtime PM link to master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) void __iomem *sfrbase; /* our registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct clk *clk; /* SYSMMU's clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct clk *aclk; /* SYSMMU's aclk clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct clk *pclk; /* SYSMMU's pclk clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct clk *clk_master; /* master's device clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) spinlock_t lock; /* lock for modyfying state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) bool active; /* current status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct exynos_iommu_domain *domain; /* domain we belong to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct list_head domain_node; /* node for domain clients list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct list_head owner_node; /* node for owner controllers list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) phys_addr_t pgtable; /* assigned page table structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned int version; /* our version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct iommu_device iommu; /* IOMMU core handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return container_of(dom, struct exynos_iommu_domain, domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static void sysmmu_unblock(struct sysmmu_drvdata *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static bool sysmmu_block(struct sysmmu_drvdata *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int i = 120;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) --i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) sysmmu_unblock(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (MMU_MAJ_VER(data->version) < 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) writel(0x1, data->sfrbase + REG_MMU_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) sysmmu_iova_t iova, unsigned int num_inv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (MMU_MAJ_VER(data->version) < 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) for (i = 0; i < num_inv; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) writel((iova & SPAGE_MASK) | 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) data->sfrbase + REG_MMU_FLUSH_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) iova += SPAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (num_inv == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) writel((iova & SPAGE_MASK) | 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) writel((iova & SPAGE_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) data->sfrbase + REG_V5_MMU_FLUSH_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) data->sfrbase + REG_V5_MMU_FLUSH_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (MMU_MAJ_VER(data->version) < 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) writel(pgd >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) data->sfrbase + REG_V5_PT_BASE_PFN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) __sysmmu_tlb_invalidate(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) BUG_ON(clk_prepare_enable(data->clk_master));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) BUG_ON(clk_prepare_enable(data->clk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) BUG_ON(clk_prepare_enable(data->pclk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) BUG_ON(clk_prepare_enable(data->aclk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) clk_disable_unprepare(data->aclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) clk_disable_unprepare(data->pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) clk_disable_unprepare(data->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) clk_disable_unprepare(data->clk_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static void __sysmmu_get_version(struct sysmmu_drvdata *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) u32 ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) __sysmmu_enable_clocks(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ver = readl(data->sfrbase + REG_MMU_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* controllers on some SoCs don't report proper version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (ver == 0x80000001u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) data->version = MAKE_MMU_VER(1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) data->version = MMU_RAW_VER(ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) __sysmmu_disable_clocks(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static void show_fault_information(struct sysmmu_drvdata *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) const struct sysmmu_fault_info *finfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) sysmmu_iova_t fault_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) sysmmu_pte_t *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) dev_name(data->master), finfo->name, fault_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (lv1ent_page(ent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ent = page_entry(ent, fault_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* SYSMMU is in blocked state when interrupt occurred. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct sysmmu_drvdata *data = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) const struct sysmmu_fault_info *finfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) unsigned int i, n, itype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) sysmmu_iova_t fault_addr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) unsigned short reg_status, reg_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int ret = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) WARN_ON(!data->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (MMU_MAJ_VER(data->version) < 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) reg_status = REG_INT_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) reg_clear = REG_INT_CLEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) finfo = sysmmu_faults;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) n = ARRAY_SIZE(sysmmu_faults);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) reg_status = REG_V5_INT_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) reg_clear = REG_V5_INT_CLEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) finfo = sysmmu_v5_faults;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) n = ARRAY_SIZE(sysmmu_v5_faults);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) spin_lock(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) clk_enable(data->clk_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) itype = __ffs(readl(data->sfrbase + reg_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) for (i = 0; i < n; i++, finfo++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (finfo->bit == itype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* unknown/unsupported fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) BUG_ON(i == n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /* print debug message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) fault_addr = readl(data->sfrbase + finfo->addr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) show_fault_information(data, finfo, fault_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (data->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ret = report_iommu_fault(&data->domain->domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) data->master, fault_addr, finfo->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* fault is not recovered by fault handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) BUG_ON(ret != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) writel(1 << itype, data->sfrbase + reg_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) sysmmu_unblock(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) clk_disable(data->clk_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) spin_unlock(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static void __sysmmu_disable(struct sysmmu_drvdata *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) clk_enable(data->clk_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) spin_lock_irqsave(&data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) writel(0, data->sfrbase + REG_MMU_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) data->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) spin_unlock_irqrestore(&data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) __sysmmu_disable_clocks(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static void __sysmmu_init_config(struct sysmmu_drvdata *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) unsigned int cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (data->version <= MAKE_MMU_VER(3, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) cfg = CFG_LRU | CFG_QOS(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) else if (data->version <= MAKE_MMU_VER(3, 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) cfg |= CFG_EAP; /* enable access protection bits check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) writel(cfg, data->sfrbase + REG_MMU_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static void __sysmmu_enable(struct sysmmu_drvdata *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) __sysmmu_enable_clocks(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) spin_lock_irqsave(&data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) __sysmmu_init_config(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) __sysmmu_set_ptbase(data, data->pgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) data->active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) spin_unlock_irqrestore(&data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * SYSMMU driver keeps master's clock enabled only for the short
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * time, while accessing the registers. For performing address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * translation during DMA transaction it relies on the client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * driver to enable it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) clk_disable(data->clk_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) sysmmu_iova_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) spin_lock_irqsave(&data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) clk_enable(data->clk_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (sysmmu_block(data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (data->version >= MAKE_MMU_VER(5, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) __sysmmu_tlb_invalidate(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) __sysmmu_tlb_invalidate_entry(data, iova, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) sysmmu_unblock(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) clk_disable(data->clk_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) spin_unlock_irqrestore(&data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) sysmmu_iova_t iova, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) spin_lock_irqsave(&data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (data->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) unsigned int num_inv = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) clk_enable(data->clk_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * L2TLB invalidation required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * 4KB page: 1 invalidation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * 64KB page: 16 invalidations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * 1MB page: 64 invalidations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * because it is set-associative TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * with 8-way and 64 sets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * 1MB page can be cached in one of all sets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * 64KB page can be one of 16 consecutive sets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (MMU_MAJ_VER(data->version) == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (sysmmu_block(data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) sysmmu_unblock(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) clk_disable(data->clk_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) spin_unlock_irqrestore(&data->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static const struct iommu_ops exynos_iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static int exynos_sysmmu_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int irq, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct sysmmu_drvdata *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) data->sfrbase = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (IS_ERR(data->sfrbase))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return PTR_ERR(data->sfrbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (irq <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) dev_name(dev), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) dev_err(dev, "Unabled to register handler of irq %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) data->clk = devm_clk_get(dev, "sysmmu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (PTR_ERR(data->clk) == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) data->clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) else if (IS_ERR(data->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return PTR_ERR(data->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) data->aclk = devm_clk_get(dev, "aclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (PTR_ERR(data->aclk) == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) data->aclk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) else if (IS_ERR(data->aclk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return PTR_ERR(data->aclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) data->pclk = devm_clk_get(dev, "pclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (PTR_ERR(data->pclk) == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) data->pclk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) else if (IS_ERR(data->pclk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return PTR_ERR(data->pclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (!data->clk && (!data->aclk || !data->pclk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) dev_err(dev, "Failed to get device clock(s)!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) data->clk_master = devm_clk_get(dev, "master");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (PTR_ERR(data->clk_master) == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) data->clk_master = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) else if (IS_ERR(data->clk_master))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return PTR_ERR(data->clk_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) data->sysmmu = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) spin_lock_init(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) dev_name(data->sysmmu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ret = iommu_device_register(&data->iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) platform_set_drvdata(pdev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) __sysmmu_get_version(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (PG_ENT_SHIFT < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (MMU_MAJ_VER(data->version) < 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) LV1_PROT = SYSMMU_LV1_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) LV2_PROT = SYSMMU_LV2_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) LV1_PROT = SYSMMU_V5_LV1_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) LV2_PROT = SYSMMU_V5_LV2_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * use the first registered sysmmu device for performing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * dma mapping operations on iommu page tables (cpu cache flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (!dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) dma_dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct sysmmu_drvdata *data = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct device *master = data->master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (master) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) mutex_lock(&owner->rpm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (data->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) dev_dbg(data->sysmmu, "saving state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) __sysmmu_disable(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) mutex_unlock(&owner->rpm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct sysmmu_drvdata *data = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct device *master = data->master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (master) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) mutex_lock(&owner->rpm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (data->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) dev_dbg(data->sysmmu, "restoring state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) __sysmmu_enable(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) mutex_unlock(&owner->rpm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static const struct dev_pm_ops sysmmu_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) pm_runtime_force_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static const struct of_device_id sysmmu_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) { .compatible = "samsung,exynos-sysmmu", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static struct platform_driver exynos_sysmmu_driver __refdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) .probe = exynos_sysmmu_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) .name = "exynos-sysmmu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) .of_match_table = sysmmu_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) .pm = &sysmmu_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) *ent = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct exynos_iommu_domain *domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) dma_addr_t handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /* Check if correct PTE offsets are initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) domain = kzalloc(sizeof(*domain), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (!domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (type == IOMMU_DOMAIN_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (iommu_get_dma_cookie(&domain->domain) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) goto err_pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) } else if (type != IOMMU_DOMAIN_UNMANAGED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) goto err_pgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (!domain->pgtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) goto err_dma_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (!domain->lv2entcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) goto err_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) for (i = 0; i < NUM_LV1ENTRIES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) domain->pgtable[i] = ZERO_LV2LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* For mapping page table entries we rely on dma == phys */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) BUG_ON(handle != virt_to_phys(domain->pgtable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (dma_mapping_error(dma_dev, handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) goto err_lv2ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) spin_lock_init(&domain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) spin_lock_init(&domain->pgtablelock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) INIT_LIST_HEAD(&domain->clients);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) domain->domain.geometry.aperture_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) domain->domain.geometry.aperture_end = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) domain->domain.geometry.force_aperture = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return &domain->domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) err_lv2ent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) free_pages((unsigned long)domain->lv2entcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) err_counter:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) free_pages((unsigned long)domain->pgtable, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) err_dma_cookie:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (type == IOMMU_DOMAIN_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) iommu_put_dma_cookie(&domain->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) err_pgtable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) kfree(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct sysmmu_drvdata *data, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) WARN_ON(!list_empty(&domain->clients));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) spin_lock_irqsave(&domain->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) spin_lock(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) __sysmmu_disable(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) data->pgtable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) data->domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) list_del_init(&data->domain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) spin_unlock(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) spin_unlock_irqrestore(&domain->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (iommu_domain->type == IOMMU_DOMAIN_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) iommu_put_dma_cookie(iommu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) for (i = 0; i < NUM_LV1ENTRIES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (lv1ent_page(domain->pgtable + i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) phys_addr_t base = lv2table_base(domain->pgtable + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) kmem_cache_free(lv2table_kmem_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) phys_to_virt(base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) free_pages((unsigned long)domain->pgtable, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) free_pages((unsigned long)domain->lv2entcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) kfree(domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) phys_addr_t pagetable = virt_to_phys(domain->pgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct sysmmu_drvdata *data, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (!has_sysmmu(dev) || owner->domain != iommu_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) mutex_lock(&owner->rpm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) list_for_each_entry(data, &owner->controllers, owner_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) pm_runtime_get_noresume(data->sysmmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (pm_runtime_active(data->sysmmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) __sysmmu_disable(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) pm_runtime_put(data->sysmmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) spin_lock_irqsave(&domain->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) spin_lock(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) data->pgtable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) data->domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) list_del_init(&data->domain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) spin_unlock(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) owner->domain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) spin_unlock_irqrestore(&domain->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) mutex_unlock(&owner->rpm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) &pagetable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct sysmmu_drvdata *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) phys_addr_t pagetable = virt_to_phys(domain->pgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (!has_sysmmu(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (owner->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) exynos_iommu_detach_device(owner->domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) mutex_lock(&owner->rpm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) spin_lock_irqsave(&domain->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) list_for_each_entry(data, &owner->controllers, owner_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) spin_lock(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) data->pgtable = pagetable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) data->domain = domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) list_add_tail(&data->domain_node, &domain->clients);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) spin_unlock(&data->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) owner->domain = iommu_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) spin_unlock_irqrestore(&domain->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) list_for_each_entry(data, &owner->controllers, owner_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) pm_runtime_get_noresume(data->sysmmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (pm_runtime_active(data->sysmmu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) __sysmmu_enable(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) pm_runtime_put(data->sysmmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) mutex_unlock(&owner->rpm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) &pagetable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (lv1ent_section(sent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return ERR_PTR(-EADDRINUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (lv1ent_fault(sent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) dma_addr_t handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) sysmmu_pte_t *pent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) bool need_flush_flpd_cache = lv1ent_zero(sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (!pent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) kmemleak_ignore(pent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) *pgcounter = NUM_LV2ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (dma_mapping_error(dma_dev, handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) kmem_cache_free(lv2table_kmem_cache, pent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return ERR_PTR(-EADDRINUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * FLPD cache may cache the address of zero_l2_table. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * function replaces the zero_l2_table with new L2 page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * to write valid mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * Accessing the valid area may cause page fault since FLPD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * cache may still cache zero_l2_table for the valid area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * instead of new L2 page table that has the mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * information of the valid area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * Thus any replacement of zero_l2_table with other valid L2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * page table must involve FLPD cache invalidation for System
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * MMU v3.3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * FLPD cache invalidation is performed with TLB invalidation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * by VPN without blocking. It is safe to invalidate TLB without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * blocking because the target address of TLB invalidation is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * not currently mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (need_flush_flpd_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct sysmmu_drvdata *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) spin_lock(&domain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) list_for_each_entry(data, &domain->clients, domain_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) sysmmu_tlb_invalidate_flpdcache(data, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) spin_unlock(&domain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) return page_entry(sent, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) static int lv1set_section(struct exynos_iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) sysmmu_pte_t *sent, sysmmu_iova_t iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) phys_addr_t paddr, int prot, short *pgcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (lv1ent_section(sent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (lv1ent_page(sent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (*pgcnt != NUM_LV2ENTRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) *pgcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) spin_lock(&domain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (lv1ent_page_zero(sent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct sysmmu_drvdata *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * entry by speculative prefetch of SLPD which has no mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) list_for_each_entry(data, &domain->clients, domain_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) sysmmu_tlb_invalidate_flpdcache(data, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) spin_unlock(&domain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) int prot, short *pgcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (size == SPAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (WARN_ON(!lv2ent_fault(pent)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) return -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) *pgcnt -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) } else { /* size == LPAGE_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) dma_addr_t pent_base = virt_to_phys(pent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) dma_sync_single_for_cpu(dma_dev, pent_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) sizeof(*pent) * SPAGES_PER_LPAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (WARN_ON(!lv2ent_fault(pent))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (i > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) memset(pent - i, 0, sizeof(*pent) * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) *pent = mk_lv2ent_lpage(paddr, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) dma_sync_single_for_device(dma_dev, pent_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) sizeof(*pent) * SPAGES_PER_LPAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) *pgcnt -= SPAGES_PER_LPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * System MMU v3.x has advanced logic to improve address translation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * performance with caching more page table entries by a page table walk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * However, the logic has a bug that while caching faulty page table entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * System MMU reports page fault if the cached fault entry is hit even though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * the fault entry is updated to a valid entry after the entry is cached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * To prevent caching faulty page table entries which may be updated to valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * entries later, the virtual memory manager should care about the workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * for the problem. The following describes the workaround.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * Any two consecutive I/O virtual address regions must have a hole of 128KiB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * Precisely, any start address of I/O virtual region must be aligned with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * the following sizes for System MMU v3.1 and v3.2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * System MMU v3.1: 128KiB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * System MMU v3.2: 256KiB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * Because System MMU v3.3 caches page table entries more aggressively, it needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * more workarounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * - Any two consecutive I/O virtual regions must have a hole of size larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * than or equal to 128KiB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * - Start address of an I/O virtual region must be aligned by 128KiB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static int exynos_iommu_map(struct iommu_domain *iommu_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) unsigned long l_iova, phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) sysmmu_pte_t *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) BUG_ON(domain->pgtable == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) prot &= SYSMMU_SUPPORTED_PROT_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) spin_lock_irqsave(&domain->pgtablelock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) entry = section_entry(domain->pgtable, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (size == SECT_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) ret = lv1set_section(domain, entry, iova, paddr, prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) &domain->lv2entcnt[lv1ent_offset(iova)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) sysmmu_pte_t *pent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) pent = alloc_lv2entry(domain, entry, iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) &domain->lv2entcnt[lv1ent_offset(iova)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (IS_ERR(pent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) ret = PTR_ERR(pent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) ret = lv2set_page(pent, paddr, size, prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) &domain->lv2entcnt[lv1ent_offset(iova)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) __func__, ret, size, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) spin_unlock_irqrestore(&domain->pgtablelock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) sysmmu_iova_t iova, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct sysmmu_drvdata *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) spin_lock_irqsave(&domain->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) list_for_each_entry(data, &domain->clients, domain_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) sysmmu_tlb_invalidate_entry(data, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) spin_unlock_irqrestore(&domain->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) unsigned long l_iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) sysmmu_pte_t *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) size_t err_pgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) BUG_ON(domain->pgtable == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) spin_lock_irqsave(&domain->pgtablelock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) ent = section_entry(domain->pgtable, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (lv1ent_section(ent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (WARN_ON(size < SECT_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) err_pgsize = SECT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /* workaround for h/w bug in System MMU v3.3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) exynos_iommu_set_pte(ent, ZERO_LV2LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) size = SECT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (unlikely(lv1ent_fault(ent))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (size > SECT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) size = SECT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /* lv1ent_page(sent) == true here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) ent = page_entry(ent, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (unlikely(lv2ent_fault(ent))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) size = SPAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (lv2ent_small(ent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) exynos_iommu_set_pte(ent, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) size = SPAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) domain->lv2entcnt[lv1ent_offset(iova)] += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /* lv1ent_large(ent) == true here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (WARN_ON(size < LPAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) err_pgsize = LPAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) sizeof(*ent) * SPAGES_PER_LPAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) sizeof(*ent) * SPAGES_PER_LPAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) size = LPAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) spin_unlock_irqrestore(&domain->pgtablelock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) exynos_iommu_tlb_invalidate_entry(domain, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) spin_unlock_irqrestore(&domain->pgtablelock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) __func__, size, iova, err_pgsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) dma_addr_t iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) sysmmu_pte_t *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) phys_addr_t phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) spin_lock_irqsave(&domain->pgtablelock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) entry = section_entry(domain->pgtable, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (lv1ent_section(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) phys = section_phys(entry) + section_offs(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) } else if (lv1ent_page(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) entry = page_entry(entry, iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (lv2ent_large(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) phys = lpage_phys(entry) + lpage_offs(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) else if (lv2ent_small(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) phys = spage_phys(entry) + spage_offs(iova);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) spin_unlock_irqrestore(&domain->pgtablelock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct sysmmu_drvdata *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (!has_sysmmu(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) list_for_each_entry(data, &owner->controllers, owner_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * SYSMMU will be runtime activated via device link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * (dependency) to its master device, so there are no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * direct calls to pm_runtime_get/put in this driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) data->link = device_link_add(dev, data->sysmmu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) DL_FLAG_STATELESS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) DL_FLAG_PM_RUNTIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /* There is always at least one entry, see exynos_iommu_of_xlate() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) data = list_first_entry(&owner->controllers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct sysmmu_drvdata, owner_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) return &data->iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) static void exynos_iommu_release_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct sysmmu_drvdata *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (!has_sysmmu(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (owner->domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct iommu_group *group = iommu_group_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) WARN_ON(owner->domain !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) iommu_group_default_domain(group));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) exynos_iommu_detach_device(owner->domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) list_for_each_entry(data, &owner->controllers, owner_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) device_link_del(data->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static int exynos_iommu_of_xlate(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) struct of_phandle_args *spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) struct platform_device *sysmmu = of_find_device_by_node(spec->np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct sysmmu_drvdata *data, *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (!sysmmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) data = platform_get_drvdata(sysmmu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) put_device(&sysmmu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (!owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) owner = kzalloc(sizeof(*owner), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (!owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) put_device(&sysmmu->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) INIT_LIST_HEAD(&owner->controllers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) mutex_init(&owner->rpm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) dev_iommu_priv_set(dev, owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) list_for_each_entry(entry, &owner->controllers, owner_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (entry == data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) list_add_tail(&data->owner_node, &owner->controllers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) data->master = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) static const struct iommu_ops exynos_iommu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) .domain_alloc = exynos_iommu_domain_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) .domain_free = exynos_iommu_domain_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) .attach_dev = exynos_iommu_attach_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) .detach_dev = exynos_iommu_detach_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) .map = exynos_iommu_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) .unmap = exynos_iommu_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) .iova_to_phys = exynos_iommu_iova_to_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) .device_group = generic_device_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) .probe_device = exynos_iommu_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) .release_device = exynos_iommu_release_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) .of_xlate = exynos_iommu_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) static int __init exynos_iommu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) np = of_find_matching_node(NULL, sysmmu_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (!lv2table_kmem_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) pr_err("%s: Failed to create kmem cache\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) ret = platform_driver_register(&exynos_sysmmu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) pr_err("%s: Failed to register driver\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) goto err_reg_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (zero_lv2_table == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) pr_err("%s: Failed to allocate zero level2 page table\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) goto err_zero_lv2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) pr_err("%s: Failed to register exynos-iommu driver.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) goto err_set_iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) err_set_iommu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) err_zero_lv2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) platform_driver_unregister(&exynos_sysmmu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) err_reg_driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) kmem_cache_destroy(lv2table_kmem_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) core_initcall(exynos_iommu_init);