^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * CPU-agnostic ARM page table allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * ARMv7 Short-descriptor format, supporting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * - Basic memory attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * - Simplified access permissions (AP[2:1] model)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * - Backwards-compatible TEX remap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * - Large pages/supersections (if indicated by the caller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Not supporting:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * - Legacy access permissions (AP[2:0] model)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Almost certainly never supporting:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * - PXN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * - Domains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Copyright (C) 2014-2015 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Copyright (c) 2014-2015 MediaTek Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define pr_fmt(fmt) "arm-v7s io-pgtable: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/io-pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/kmemleak.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* Struct accessors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define io_pgtable_to_data(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) container_of((x), struct arm_v7s_io_pgtable, iop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define io_pgtable_ops_to_data(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * We have 32 bits total; 12 bits resolved at level 1, 8 bits at level 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * and 12 bits in a page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * MediaTek extend 2 bits to reach 34bits, 14 bits at lvl1 and 8 bits at lvl2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define ARM_V7S_ADDR_BITS 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define _ARM_V7S_LVL_BITS(lvl, cfg) ((lvl) == 1 ? ((cfg)->ias - 20) : 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define ARM_V7S_LVL_SHIFT(lvl) ((lvl) == 1 ? 20 : 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define ARM_V7S_TABLE_SHIFT 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define ARM_V7S_PTES_PER_LVL(lvl, cfg) (1 << _ARM_V7S_LVL_BITS(lvl, cfg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define ARM_V7S_TABLE_SIZE(lvl, cfg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) (ARM_V7S_PTES_PER_LVL(lvl, cfg) * sizeof(arm_v7s_iopte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define ARM_V7S_BLOCK_SIZE(lvl) (1UL << ARM_V7S_LVL_SHIFT(lvl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define ARM_V7S_LVL_MASK(lvl) ((u32)(~0U << ARM_V7S_LVL_SHIFT(lvl)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define ARM_V7S_TABLE_MASK ((u32)(~0U << ARM_V7S_TABLE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define _ARM_V7S_IDX_MASK(lvl, cfg) (ARM_V7S_PTES_PER_LVL(lvl, cfg) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define ARM_V7S_LVL_IDX(addr, lvl, cfg) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int _l = lvl; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ((addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l, cfg); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Large page/supersection entries are effectively a block of 16 page/section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * entries, along the lines of the LPAE contiguous hint, but all with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * same output address. For want of a better common name we'll call them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * "contiguous" versions of their respective page/section entries here, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * noting the distinction (WRT to TLB maintenance) that they represent *one*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * entry repeated 16 times, not 16 separate entries (as in the LPAE case).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define ARM_V7S_CONT_PAGES 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* PTE type bits: these are all mixed up with XN/PXN bits in most cases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define ARM_V7S_PTE_TYPE_TABLE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define ARM_V7S_PTE_TYPE_PAGE 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define ARM_V7S_PTE_TYPE_CONT_PAGE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define ARM_V7S_PTE_IS_VALID(pte) (((pte) & 0x3) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define ARM_V7S_PTE_IS_TABLE(pte, lvl) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ((lvl) == 1 && (((pte) & 0x3) == ARM_V7S_PTE_TYPE_TABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* Page table bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define ARM_V7S_ATTR_XN(lvl) BIT(4 * (2 - (lvl)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define ARM_V7S_ATTR_B BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define ARM_V7S_ATTR_C BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define ARM_V7S_ATTR_NS_TABLE BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define ARM_V7S_ATTR_NS_SECTION BIT(19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define ARM_V7S_CONT_SECTION BIT(18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define ARM_V7S_CONT_PAGE_XN_SHIFT 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * The attribute bits are consistently ordered*, but occupy bits [17:10] of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * a level 1 PTE vs. bits [11:4] at level 2. Thus we define the individual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * fields relative to that 8-bit block, plus a total shift relative to the PTE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define ARM_V7S_ATTR_SHIFT(lvl) (16 - (lvl) * 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define ARM_V7S_ATTR_MASK 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define ARM_V7S_ATTR_AP0 BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define ARM_V7S_ATTR_AP1 BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define ARM_V7S_ATTR_AP2 BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define ARM_V7S_ATTR_S BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define ARM_V7S_ATTR_NG BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define ARM_V7S_TEX_SHIFT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define ARM_V7S_TEX_MASK 0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* MediaTek extend the bits below for PA 32bit/33bit/34bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define ARM_V7S_ATTR_MTK_PA_BIT32 BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define ARM_V7S_ATTR_MTK_PA_BIT33 BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define ARM_V7S_ATTR_MTK_PA_BIT34 BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* *well, except for TEX on level 2 large pages, of course :( */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define ARM_V7S_CONT_PAGE_TEX_SHIFT 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define ARM_V7S_CONT_PAGE_TEX_MASK (ARM_V7S_TEX_MASK << ARM_V7S_CONT_PAGE_TEX_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Simplified access permissions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define ARM_V7S_PTE_AF ARM_V7S_ATTR_AP0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define ARM_V7S_PTE_AP_UNPRIV ARM_V7S_ATTR_AP1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define ARM_V7S_PTE_AP_RDONLY ARM_V7S_ATTR_AP2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Register bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define ARM_V7S_RGN_NC 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define ARM_V7S_RGN_WBWA 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define ARM_V7S_RGN_WT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define ARM_V7S_RGN_WB 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define ARM_V7S_PRRR_TYPE_DEVICE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define ARM_V7S_PRRR_TYPE_NORMAL 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define ARM_V7S_PRRR_TR(n, type) (((type) & 0x3) << ((n) * 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define ARM_V7S_PRRR_DS0 BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define ARM_V7S_PRRR_DS1 BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define ARM_V7S_PRRR_NS0 BIT(18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define ARM_V7S_PRRR_NS1 BIT(19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define ARM_V7S_PRRR_NOS(n) BIT((n) + 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define ARM_V7S_NMRR_IR(n, attr) (((attr) & 0x3) << ((n) * 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define ARM_V7S_NMRR_OR(n, attr) (((attr) & 0x3) << ((n) * 2 + 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define ARM_V7S_TTBR_S BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define ARM_V7S_TTBR_NOS BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define ARM_V7S_TTBR_ORGN_ATTR(attr) (((attr) & 0x3) << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define ARM_V7S_TTBR_IRGN_ATTR(attr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #ifdef CONFIG_ZONE_DMA32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define ARM_V7S_TABLE_GFP_DMA GFP_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) typedef u32 arm_v7s_iopte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static bool selftest_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct arm_v7s_io_pgtable {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct io_pgtable iop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) arm_v7s_iopte *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct kmem_cache *l2_tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) spinlock_t split_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static dma_addr_t __arm_v7s_dma_addr(void *pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return (dma_addr_t)virt_to_phys(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static bool arm_v7s_is_mtk_enabled(struct io_pgtable_cfg *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct io_pgtable_cfg *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (!arm_v7s_is_mtk_enabled(cfg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (paddr & BIT_ULL(32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) pte |= ARM_V7S_ATTR_MTK_PA_BIT32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (paddr & BIT_ULL(33))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) pte |= ARM_V7S_ATTR_MTK_PA_BIT33;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (paddr & BIT_ULL(34))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) pte |= ARM_V7S_ATTR_MTK_PA_BIT34;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct io_pgtable_cfg *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) arm_v7s_iopte mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) phys_addr_t paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (ARM_V7S_PTE_IS_TABLE(pte, lvl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) mask = ARM_V7S_TABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) else if (arm_v7s_pte_is_cont(pte, lvl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) mask = ARM_V7S_LVL_MASK(lvl) * ARM_V7S_CONT_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) mask = ARM_V7S_LVL_MASK(lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) paddr = pte & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (!arm_v7s_is_mtk_enabled(cfg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (pte & ARM_V7S_ATTR_MTK_PA_BIT32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) paddr |= BIT_ULL(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (pte & ARM_V7S_ATTR_MTK_PA_BIT33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) paddr |= BIT_ULL(33);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (pte & ARM_V7S_ATTR_MTK_PA_BIT34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) paddr |= BIT_ULL(34);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct arm_v7s_io_pgtable *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return phys_to_virt(iopte_to_paddr(pte, lvl, &data->iop.cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct arm_v7s_io_pgtable *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct io_pgtable_cfg *cfg = &data->iop.cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct device *dev = cfg->iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void *table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (lvl == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) table = (void *)__get_free_pages(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) else if (lvl == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) table = kmem_cache_zalloc(data->l2_tables, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (!table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) phys = virt_to_phys(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (phys != (arm_v7s_iopte)phys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* Doesn't fit in PTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (!cfg->coherent_walk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (dma_mapping_error(dev, dma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * We depend on the IOMMU being able to work with any physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * address directly, so if the DMA layer suggests otherwise by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * translating or truncating them, that bodes very badly...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (dma != phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) goto out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (lvl == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) kmemleak_ignore(table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) out_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (lvl == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) free_pages((unsigned long)table, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) kmem_cache_free(data->l2_tables, table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static void __arm_v7s_free_table(void *table, int lvl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct arm_v7s_io_pgtable *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct io_pgtable_cfg *cfg = &data->iop.cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct device *dev = cfg->iommu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) size_t size = ARM_V7S_TABLE_SIZE(lvl, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (!cfg->coherent_walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (lvl == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) free_pages((unsigned long)table, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) kmem_cache_free(data->l2_tables, table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct io_pgtable_cfg *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (cfg->coherent_walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) num_entries * sizeof(*ptep), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static void __arm_v7s_set_pte(arm_v7s_iopte *ptep, arm_v7s_iopte pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int num_entries, struct io_pgtable_cfg *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) for (i = 0; i < num_entries; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ptep[i] = pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) __arm_v7s_pte_sync(ptep, num_entries, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct io_pgtable_cfg *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) bool ap = !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!(prot & IOMMU_MMIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) pte |= ARM_V7S_ATTR_TEX(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (ap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) pte |= ARM_V7S_PTE_AF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!(prot & IOMMU_PRIV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) pte |= ARM_V7S_PTE_AP_UNPRIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!(prot & IOMMU_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) pte |= ARM_V7S_PTE_AP_RDONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) pte <<= ARM_V7S_ATTR_SHIFT(lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if ((prot & IOMMU_NOEXEC) && ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) pte |= ARM_V7S_ATTR_XN(lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (prot & IOMMU_MMIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) pte |= ARM_V7S_ATTR_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) else if (prot & IOMMU_CACHE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) pte |= ARM_V7S_PTE_TYPE_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) pte |= ARM_V7S_ATTR_NS_SECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int prot = IOMMU_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!(attr & ARM_V7S_PTE_AP_RDONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) prot |= IOMMU_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (!(attr & ARM_V7S_PTE_AP_UNPRIV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) prot |= IOMMU_PRIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) prot |= IOMMU_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) else if (pte & ARM_V7S_ATTR_C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) prot |= IOMMU_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (pte & ARM_V7S_ATTR_XN(lvl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) prot |= IOMMU_NOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static arm_v7s_iopte arm_v7s_pte_to_cont(arm_v7s_iopte pte, int lvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (lvl == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) pte |= ARM_V7S_CONT_SECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) } else if (lvl == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) arm_v7s_iopte xn = pte & ARM_V7S_ATTR_XN(lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) arm_v7s_iopte tex = pte & ARM_V7S_CONT_PAGE_TEX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) pte ^= xn | tex | ARM_V7S_PTE_TYPE_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) pte |= (xn << ARM_V7S_CONT_PAGE_XN_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) (tex << ARM_V7S_CONT_PAGE_TEX_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ARM_V7S_PTE_TYPE_CONT_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static arm_v7s_iopte arm_v7s_cont_to_pte(arm_v7s_iopte pte, int lvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (lvl == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) pte &= ~ARM_V7S_CONT_SECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) } else if (lvl == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) arm_v7s_iopte xn = pte & BIT(ARM_V7S_CONT_PAGE_XN_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) arm_v7s_iopte tex = pte & (ARM_V7S_CONT_PAGE_TEX_MASK <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ARM_V7S_CONT_PAGE_TEX_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) pte ^= xn | tex | ARM_V7S_PTE_TYPE_CONT_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) pte |= (xn >> ARM_V7S_CONT_PAGE_XN_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) (tex >> ARM_V7S_CONT_PAGE_TEX_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ARM_V7S_PTE_TYPE_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte, lvl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return pte & ARM_V7S_CONT_SECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) else if (lvl == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return !(pte & ARM_V7S_PTE_TYPE_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct iommu_iotlb_gather *, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) size_t, int, arm_v7s_iopte *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) unsigned long iova, phys_addr_t paddr, int prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) int lvl, int num_entries, arm_v7s_iopte *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct io_pgtable_cfg *cfg = &data->iop.cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) arm_v7s_iopte pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) for (i = 0; i < num_entries; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (ARM_V7S_PTE_IS_TABLE(ptep[i], lvl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * We need to unmap and free the old table before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * overwriting it with a block entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) arm_v7s_iopte *tblp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) size_t sz = ARM_V7S_BLOCK_SIZE(lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) sz, lvl, tblp) != sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) } else if (ptep[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* We require an unmap first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) WARN_ON(!selftest_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) pte = arm_v7s_prot_to_pte(prot, lvl, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (num_entries > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) pte = arm_v7s_pte_to_cont(pte, lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) pte |= paddr_to_iopte(paddr, lvl, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) __arm_v7s_set_pte(ptep, pte, num_entries, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) arm_v7s_iopte *ptep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) arm_v7s_iopte curr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct io_pgtable_cfg *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) arm_v7s_iopte old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) new = virt_to_phys(table) | ARM_V7S_PTE_TYPE_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) new |= ARM_V7S_ATTR_NS_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * Ensure the table itself is visible before its PTE can be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * Whilst we could get away with cmpxchg64_release below, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * doesn't have any ordering semantics when !CONFIG_SMP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) old = cmpxchg_relaxed(ptep, curr, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) __arm_v7s_pte_sync(ptep, 1, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) phys_addr_t paddr, size_t size, int prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int lvl, arm_v7s_iopte *ptep, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct io_pgtable_cfg *cfg = &data->iop.cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) arm_v7s_iopte pte, *cptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) int num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* Find our entry at the current level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ptep += ARM_V7S_LVL_IDX(iova, lvl, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* If we can install a leaf entry at this level, then do so */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (num_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return arm_v7s_init_pte(data, iova, paddr, prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) lvl, num_entries, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* We can't allocate tables at the final level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (WARN_ON(lvl == 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* Grab a pointer to the next level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) pte = READ_ONCE(*ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (!pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) cptep = __arm_v7s_alloc_table(lvl + 1, gfp, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (!cptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) pte = arm_v7s_install_table(cptep, ptep, 0, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) __arm_v7s_free_table(cptep, lvl + 1, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* We've no easy way of knowing if it's synced yet, so... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) __arm_v7s_pte_sync(ptep, 1, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) cptep = iopte_deref(pte, lvl, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) } else if (pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* We require an unmap first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) WARN_ON(!selftest_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /* Rinse, repeat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) phys_addr_t paddr, size_t pgsize, size_t pgcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int prot, gfp_t gfp, size_t *mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /* If no access, then nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) paddr >= (1ULL << data->iop.cfg.oas)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) while (pgcount--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) iova += pgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) paddr += pgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) *mapped += pgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * Synchronise all PTE updates for the new mapping before there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * a chance for anything to kick off a table walk for the new iova.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return arm_v7s_map_pages(ops, iova, paddr, size, 1, prot, gfp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static void arm_v7s_free_pgtable(struct io_pgtable *iop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) for (i = 0; i < ARM_V7S_PTES_PER_LVL(1, &data->iop.cfg); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) arm_v7s_iopte pte = data->pgd[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (ARM_V7S_PTE_IS_TABLE(pte, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) __arm_v7s_free_table(iopte_deref(pte, 1, data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 2, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) __arm_v7s_free_table(data->pgd, 1, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) kmem_cache_destroy(data->l2_tables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) unsigned long iova, int idx, int lvl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) arm_v7s_iopte *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct io_pgtable *iop = &data->iop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) arm_v7s_iopte pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) size_t size = ARM_V7S_BLOCK_SIZE(lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Check that we didn't lose a race to get the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) pte = *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (!arm_v7s_pte_is_cont(pte, lvl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ptep -= idx & (ARM_V7S_CONT_PAGES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) pte = arm_v7s_cont_to_pte(pte, lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) for (i = 0; i < ARM_V7S_CONT_PAGES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ptep[i] = pte + i * size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) size *= ARM_V7S_CONT_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) io_pgtable_tlb_flush_walk(iop, iova, size, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct iommu_iotlb_gather *gather,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) arm_v7s_iopte blk_pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) arm_v7s_iopte *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct io_pgtable_cfg *cfg = &data->iop.cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) arm_v7s_iopte pte, *tablep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) int i, unmap_idx, num_entries, num_ptes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) tablep = __arm_v7s_alloc_table(2, GFP_ATOMIC, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (!tablep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return 0; /* Bytes unmapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) num_ptes = ARM_V7S_PTES_PER_LVL(2, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) num_entries = size >> ARM_V7S_LVL_SHIFT(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) unmap_idx = ARM_V7S_LVL_IDX(iova, 2, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) pte = arm_v7s_prot_to_pte(arm_v7s_pte_to_prot(blk_pte, 1), 2, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (num_entries > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) pte = arm_v7s_pte_to_cont(pte, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) for (i = 0; i < num_ptes; i += num_entries, pte += size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* Unmap! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (i == unmap_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) __arm_v7s_set_pte(&tablep[i], pte, num_entries, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) pte = arm_v7s_install_table(tablep, ptep, blk_pte, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (pte != blk_pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) __arm_v7s_free_table(tablep, 2, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (!ARM_V7S_PTE_IS_TABLE(pte, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) tablep = iopte_deref(pte, 1, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return __arm_v7s_unmap(data, gather, iova, size, 2, tablep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct iommu_iotlb_gather *gather,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) unsigned long iova, size_t size, int lvl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) arm_v7s_iopte *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) arm_v7s_iopte pte[ARM_V7S_CONT_PAGES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct io_pgtable *iop = &data->iop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int idx, i = 0, num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /* Something went horribly wrong and we ran out of page table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (WARN_ON(lvl > 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) idx = ARM_V7S_LVL_IDX(iova, lvl, &iop->cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ptep += idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) pte[i] = READ_ONCE(ptep[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (WARN_ON(!ARM_V7S_PTE_IS_VALID(pte[i])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) } while (++i < num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * If we've hit a contiguous 'large page' entry at this level, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * needs splitting first, unless we're unmapping the whole lot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * For splitting, we can't rewrite 16 PTEs atomically, and since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * can't necessarily assume TEX remap we don't have a software bit to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * mark live entries being split. In practice (i.e. DMA API code), we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * will never be splitting large pages anyway, so just wrap this edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * case in a lock for the sake of correctness and be done with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (num_entries <= 1 && arm_v7s_pte_is_cont(pte[0], lvl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) spin_lock_irqsave(&data->split_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) pte[0] = arm_v7s_split_cont(data, iova, idx, lvl, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) spin_unlock_irqrestore(&data->split_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* If the size matches this level, we're in the right place */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (num_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) size_t blk_size = ARM_V7S_BLOCK_SIZE(lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) __arm_v7s_set_pte(ptep, 0, num_entries, &iop->cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) for (i = 0; i < num_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* Also flush any partial walks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) io_pgtable_tlb_flush_walk(iop, iova, blk_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) ARM_V7S_BLOCK_SIZE(lvl + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) ptep = iopte_deref(pte[i], lvl, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) __arm_v7s_free_table(ptep, lvl + 1, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * Order the PTE update against queueing the IOVA, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * guarantee that a flush callback from a different CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * has observed it before the TLBIALL can be issued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) io_pgtable_tlb_add_page(iop, gather, iova, blk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) iova += blk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) } else if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte[0], lvl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * Insert a table at the next level to map the old region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * minus the part we want to unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return arm_v7s_split_blk_unmap(data, gather, iova, size, pte[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /* Keep on walkin' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ptep = iopte_deref(pte[0], lvl, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return __arm_v7s_unmap(data, gather, iova, size, lvl + 1, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static size_t arm_v7s_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) size_t pgsize, size_t pgcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) size_t unmapped = 0, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) while (pgcount--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ret = __arm_v7s_unmap(data, gather, iova, pgsize, 1, data->pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) unmapped += pgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) iova += pgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return unmapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) size_t size, struct iommu_iotlb_gather *gather)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) return arm_v7s_unmap_pages(ops, iova, size, 1, gather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) unsigned long iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) arm_v7s_iopte *ptep = data->pgd, pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) int lvl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ptep += ARM_V7S_LVL_IDX(iova, ++lvl, &data->iop.cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) pte = READ_ONCE(*ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) ptep = iopte_deref(pte, lvl, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) } while (ARM_V7S_PTE_IS_TABLE(pte, lvl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (!ARM_V7S_PTE_IS_VALID(pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) mask = ARM_V7S_LVL_MASK(lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (arm_v7s_pte_is_cont(pte, lvl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) mask *= ARM_V7S_CONT_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return iopte_to_paddr(pte, lvl, &data->iop.cfg) | (iova & ~mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct arm_v7s_io_pgtable *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (cfg->ias > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (cfg->oas > (arm_v7s_is_mtk_enabled(cfg) ? 35 : ARM_V7S_ADDR_BITS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) IO_PGTABLE_QUIRK_NO_PERMS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) IO_PGTABLE_QUIRK_ARM_MTK_EXT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) IO_PGTABLE_QUIRK_NON_STRICT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) data = kmalloc(sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) spin_lock_init(&data->split_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) ARM_V7S_TABLE_SIZE(2, cfg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) ARM_V7S_TABLE_SIZE(2, cfg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ARM_V7S_TABLE_SLAB_FLAGS, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (!data->l2_tables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) goto out_free_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) data->iop.ops = (struct io_pgtable_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) .map = arm_v7s_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) .map_pages = arm_v7s_map_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) .unmap = arm_v7s_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) .unmap_pages = arm_v7s_unmap_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) .iova_to_phys = arm_v7s_iova_to_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /* We have to do this early for __arm_v7s_alloc_table to work... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) data->iop.cfg = *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * Unless the IOMMU driver indicates supersection support by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * having SZ_16M set in the initial bitmap, they won't be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /* TCR: T0SZ=0, EAE=0 (if applicable) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) cfg->arm_v7s_cfg.tcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * TEX remap: the indices used map to the closest equivalent types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * under the non-TEX-remap interpretation of those attribute bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * excepting various implementation-defined aspects of shareability.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) cfg->arm_v7s_cfg.prrr = ARM_V7S_PRRR_TR(1, ARM_V7S_PRRR_TYPE_DEVICE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) ARM_V7S_PRRR_TR(4, ARM_V7S_PRRR_TYPE_NORMAL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) ARM_V7S_PRRR_TR(7, ARM_V7S_PRRR_TYPE_NORMAL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) ARM_V7S_PRRR_DS0 | ARM_V7S_PRRR_DS1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ARM_V7S_PRRR_NS1 | ARM_V7S_PRRR_NOS(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) cfg->arm_v7s_cfg.nmrr = ARM_V7S_NMRR_IR(7, ARM_V7S_RGN_WBWA) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ARM_V7S_NMRR_OR(7, ARM_V7S_RGN_WBWA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /* Looking good; allocate a pgd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) data->pgd = __arm_v7s_alloc_table(1, GFP_KERNEL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (!data->pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) goto out_free_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) /* Ensure the empty pgd is visible before any actual TTBR write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /* TTBR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) cfg->arm_v7s_cfg.ttbr = virt_to_phys(data->pgd) | ARM_V7S_TTBR_S |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) (cfg->coherent_walk ? (ARM_V7S_TTBR_NOS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return &data->iop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) out_free_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) kmem_cache_destroy(data->l2_tables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) .alloc = arm_v7s_alloc_pgtable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) .free = arm_v7s_free_pgtable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) static struct io_pgtable_cfg *cfg_cookie __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) static void __init dummy_tlb_flush_all(void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) WARN_ON(cookie != cfg_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) static void __init dummy_tlb_flush(unsigned long iova, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) size_t granule, void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) WARN_ON(cookie != cfg_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) unsigned long iova, size_t granule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) void *cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) dummy_tlb_flush(iova, granule, granule, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) .tlb_flush_all = dummy_tlb_flush_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) .tlb_flush_walk = dummy_tlb_flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) .tlb_add_page = dummy_tlb_add_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) #define __FAIL(ops) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) WARN(1, "selftest: test failed\n"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) selftest_running = false; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) -EFAULT; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static int __init arm_v7s_do_selftests(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct io_pgtable_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct io_pgtable_cfg cfg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) .tlb = &dummy_tlb_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) .oas = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) .ias = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) .coherent_walk = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) .quirks = IO_PGTABLE_QUIRK_ARM_NS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) unsigned int iova, size, iova_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) unsigned int i, loopnr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) selftest_running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) cfg_cookie = &cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) ops = alloc_io_pgtable_ops(ARM_V7S, &cfg, &cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) pr_err("selftest: failed to allocate io pgtable ops\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * Initial sanity checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * Empty page tables shouldn't provide any translations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (ops->iova_to_phys(ops, 42))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return __FAIL(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (ops->iova_to_phys(ops, SZ_1G + 42))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return __FAIL(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (ops->iova_to_phys(ops, SZ_2G + 42))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return __FAIL(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * Distinct mappings of different granule sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) iova = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) size = 1UL << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (ops->map(ops, iova, iova, size, IOMMU_READ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) IOMMU_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) IOMMU_NOEXEC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) IOMMU_CACHE, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return __FAIL(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /* Overlapping mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (!ops->map(ops, iova, iova + size, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return __FAIL(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return __FAIL(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) iova += SZ_16M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) loopnr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) /* Partial unmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) size = 1UL << __ffs(cfg.pgsize_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) while (i < loopnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) iova_start = i * SZ_16M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (ops->unmap(ops, iova_start + size, size, NULL) != size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return __FAIL(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* Remap of partial unmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (ops->map(ops, iova_start + size, size, size, IOMMU_READ, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return __FAIL(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (ops->iova_to_phys(ops, iova_start + size + 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) != (size + 42))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return __FAIL(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /* Full unmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) iova = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) size = 1UL << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (ops->unmap(ops, iova, size, NULL) != size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return __FAIL(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (ops->iova_to_phys(ops, iova + 42))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return __FAIL(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /* Remap full block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return __FAIL(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return __FAIL(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) iova += SZ_16M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) free_io_pgtable_ops(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) selftest_running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) pr_info("self test ok\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) subsys_initcall(arm_v7s_do_selftests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) #endif