^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Based on arch/arm/mm/proc.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2001 Deep Blue Solutions Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Catalin Marinas <catalin.marinas@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/asm_pointer_auth.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/hwcap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/pgtable-hwdef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/sysreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #ifdef CONFIG_ARM64_64K_PAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #elif defined(CONFIG_ARM64_16K_PAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #else /* CONFIG_ARM64_4K_PAGES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #ifdef CONFIG_RANDOMIZE_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define TCR_KASLR_FLAGS TCR_NFD1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define TCR_KASLR_FLAGS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define TCR_SMP_FLAGS TCR_SHARED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* PTWs cacheable, inner/outer WBWA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #ifdef CONFIG_KASAN_SW_TAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define TCR_KASAN_SW_FLAGS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #ifdef CONFIG_KASAN_HW_TAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * TBI being enabled at EL1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * changed during __cpu_setup to Normal Tagged if the system supports MTE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define MAIR_EL1_SET \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) MAIR_ATTRIDX(MAIR_ATTR_NORMAL_iNC_oWB, MT_NORMAL_iNC_oWB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #ifdef CONFIG_CPU_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * cpu_do_suspend - save CPU registers context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * x0: virtual address of context pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) SYM_FUNC_START(cpu_do_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) mrs x2, tpidr_el0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) mrs x3, tpidrro_el0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) mrs x4, contextidr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) mrs x5, osdlr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) mrs x6, cpacr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) mrs x7, tcr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) mrs x8, vbar_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) mrs x9, mdscr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) mrs x10, oslsr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) mrs x11, sctlr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) mrs x12, tpidr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) alternative_else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) mrs x12, tpidr_el2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) alternative_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) mrs x13, sp_el0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) stp x2, x3, [x0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) stp x4, x5, [x0, #16]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) stp x6, x7, [x0, #32]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) stp x8, x9, [x0, #48]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) stp x10, x11, [x0, #64]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) stp x12, x13, [x0, #80]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * Save x18 as it may be used as a platform register, e.g. by shadow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * call stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) str x18, [x0, #96]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) SYM_FUNC_END(cpu_do_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * cpu_do_resume - restore CPU register context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * x0: Address of context pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) .pushsection ".idmap.text", "awx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) SYM_FUNC_START(cpu_do_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ldp x2, x3, [x0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ldp x4, x5, [x0, #16]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ldp x6, x8, [x0, #32]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ldp x9, x10, [x0, #48]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ldp x11, x12, [x0, #64]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) ldp x13, x14, [x0, #80]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Restore x18, as it may be used as a platform register, and clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * the buffer to minimize the risk of exposure when used for shadow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * call stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ldr x18, [x0, #96]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) str xzr, [x0, #96]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) msr tpidr_el0, x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) msr tpidrro_el0, x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) msr contextidr_el1, x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) msr cpacr_el1, x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Don't change t0sz here, mask those bits when restoring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) mrs x7, tcr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) msr tcr_el1, x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) msr vbar_el1, x9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * exception. Mask them until local_daif_restore() in cpu_suspend()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * resets them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) disable_daif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) msr mdscr_el1, x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) msr sctlr_el1, x12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) msr tpidr_el1, x13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) alternative_else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) msr tpidr_el2, x13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) alternative_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) msr sp_el0, x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * Restore oslsr_el1 by writing oslar_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) msr osdlr_el1, x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ubfx x11, x11, #1, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) msr oslar_el1, x11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) reset_pmuserenr_el0 x0 // Disable PMU access from EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) reset_amuserenr_el0 x0 // Disable AMU access from EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) alternative_if ARM64_HAS_RAS_EXTN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) msr_s SYS_DISR_EL1, xzr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ptrauth_keys_install_kernel_nosync x14, x1, x2, x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) SYM_FUNC_END(cpu_do_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) .popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) .pushsection ".idmap.text", "awx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) adrp \tmp1, reserved_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) phys_to_ttbr \tmp2, \tmp1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) offset_ttbr1 \tmp2, \tmp1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) msr ttbr1_el1, \tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) tlbi vmalle1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dsb nsh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * called by anything else. It can only be executed from a TTBR0 mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) SYM_FUNC_START(idmap_cpu_replace_ttbr1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) save_and_disable_daif flags=x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) __idmap_cpu_set_reserved_ttbr1 x1, x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) offset_ttbr1 x0, x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) msr ttbr1_el1, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) restore_daif x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) SYM_FUNC_END(idmap_cpu_replace_ttbr1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) .popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) .pushsection ".idmap.text", "awx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) .macro __idmap_kpti_get_pgtable_ent, type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) dc cvac, cur_\()\type\()p // Ensure any existing dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) dmb sy // lines are written back before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ldr \type, [cur_\()\type\()p] // loading the entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) tbz \type, #0, skip_\()\type // Skip invalid and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) tbnz \type, #11, skip_\()\type // non-global entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) .macro __idmap_kpti_put_pgtable_ent_ng, type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) orr \type, \type, #PTE_NG // Same bit for blocks and pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) str \type, [cur_\()\type\()p] // Update the entry and ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) dmb sy // that it is visible to all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dc civac, cur_\()\type\()p // CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * Called exactly once from stop_machine context by each CPU found during boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) __idmap_kpti_flag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) .long 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) SYM_FUNC_START(idmap_kpti_install_ng_mappings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) cpu .req w0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) num_cpus .req w1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) swapper_pa .req x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) swapper_ttb .req x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) flag_ptr .req x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) cur_pgdp .req x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) end_pgdp .req x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) pgd .req x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) cur_pudp .req x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) end_pudp .req x9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) pud .req x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) cur_pmdp .req x11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) end_pmdp .req x12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) pmd .req x13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) cur_ptep .req x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) end_ptep .req x15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) pte .req x16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) mrs swapper_ttb, ttbr1_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) restore_ttbr1 swapper_ttb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) adr flag_ptr, __idmap_kpti_flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) cbnz cpu, __idmap_kpti_secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* We're the boot CPU. Wait for the others to catch up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) sevl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 1: wfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ldaxr w17, [flag_ptr]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) eor w17, w17, num_cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) cbnz w17, 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* We need to walk swapper, so turn off the MMU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) pre_disable_mmu_workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) mrs x17, sctlr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) bic x17, x17, #SCTLR_ELx_M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) msr sctlr_el1, x17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* Everybody is enjoying the idmap, so we can rewrite swapper. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* PGD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) mov cur_pgdp, swapper_pa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) do_pgd: __idmap_kpti_get_pgtable_ent pgd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) tbnz pgd, #1, walk_puds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) next_pgd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) __idmap_kpti_put_pgtable_ent_ng pgd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) skip_pgd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) add cur_pgdp, cur_pgdp, #8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) cmp cur_pgdp, end_pgdp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) b.ne do_pgd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* Publish the updated tables and nuke all the TLBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) dsb sy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) tlbi vmalle1is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) dsb ish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* We're done: fire up the MMU again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) mrs x17, sctlr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) orr x17, x17, #SCTLR_ELx_M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) set_sctlr_el1 x17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* Set the flag to zero to indicate that we're all done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) str wzr, [flag_ptr]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* PUD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) walk_puds:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) .if CONFIG_PGTABLE_LEVELS > 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) pte_to_phys cur_pudp, pgd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) do_pud: __idmap_kpti_get_pgtable_ent pud
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) tbnz pud, #1, walk_pmds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) next_pud:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) __idmap_kpti_put_pgtable_ent_ng pud
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) skip_pud:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) add cur_pudp, cur_pudp, 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) cmp cur_pudp, end_pudp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) b.ne do_pud
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) b next_pgd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) .else /* CONFIG_PGTABLE_LEVELS <= 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) mov pud, pgd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) b walk_pmds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) next_pud:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) b next_pgd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* PMD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) walk_pmds:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) .if CONFIG_PGTABLE_LEVELS > 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) pte_to_phys cur_pmdp, pud
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) do_pmd: __idmap_kpti_get_pgtable_ent pmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) tbnz pmd, #1, walk_ptes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) next_pmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) __idmap_kpti_put_pgtable_ent_ng pmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) skip_pmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) add cur_pmdp, cur_pmdp, #8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) cmp cur_pmdp, end_pmdp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) b.ne do_pmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) b next_pud
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) .else /* CONFIG_PGTABLE_LEVELS <= 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) mov pmd, pud
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) b walk_ptes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) next_pmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) b next_pud
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) .endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* PTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) walk_ptes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) pte_to_phys cur_ptep, pmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) do_pte: __idmap_kpti_get_pgtable_ent pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) __idmap_kpti_put_pgtable_ent_ng pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) skip_pte:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) add cur_ptep, cur_ptep, #8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) cmp cur_ptep, end_ptep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) b.ne do_pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) b next_pmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) .unreq cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) .unreq num_cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) .unreq swapper_pa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) .unreq cur_pgdp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) .unreq end_pgdp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) .unreq pgd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) .unreq cur_pudp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) .unreq end_pudp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) .unreq pud
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) .unreq cur_pmdp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) .unreq end_pmdp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) .unreq pmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) .unreq cur_ptep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) .unreq end_ptep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) .unreq pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* Secondary CPUs end up here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) __idmap_kpti_secondary:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* Uninstall swapper before surgery begins */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) __idmap_cpu_set_reserved_ttbr1 x16, x17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Increment the flag to let the boot CPU we're ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 1: ldxr w16, [flag_ptr]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) add w16, w16, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) stxr w17, w16, [flag_ptr]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) cbnz w17, 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Wait for the boot CPU to finish messing around with swapper */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) sevl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 1: wfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) ldxr w16, [flag_ptr]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) cbnz w16, 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* All done, act like nothing happened */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) offset_ttbr1 swapper_ttb, x16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) msr ttbr1_el1, swapper_ttb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) .unreq swapper_ttb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) .unreq flag_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) SYM_FUNC_END(idmap_kpti_install_ng_mappings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) .popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * __cpu_setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * Initialise the processor for turning the MMU on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * Output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * Return in x0 the value of the SCTLR_EL1 register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) .pushsection ".idmap.text", "awx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) SYM_FUNC_START(__cpu_setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) tlbi vmalle1 // Invalidate local TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dsb nsh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) mov x1, #3 << 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) msr cpacr_el1, x1 // Enable FP/ASIMD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) mov x1, #1 << 12 // Reset mdscr_el1 and disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) msr mdscr_el1, x1 // access to the DCC from EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) isb // Unmask debug exceptions now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) enable_dbg // since this is per-cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) reset_pmuserenr_el0 x1 // Disable PMU access from EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) reset_amuserenr_el0 x1 // Disable AMU access from EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * Memory region attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) mov_q x5, MAIR_EL1_SET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) #ifdef CONFIG_ARM64_MTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) mte_tcr .req x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) mov mte_tcr, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * (ID_AA64PFR1_EL1[11:8] > 1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) mrs x10, ID_AA64PFR1_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) cmp x10, #ID_AA64PFR1_MTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) b.lt 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* Normal Tagged memory type at the corresponding MAIR index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) mov x10, #MAIR_ATTR_NORMAL_TAGGED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) bfi x5, x10, #(8 * MT_NORMAL_TAGGED), #8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) mov x10, #KERNEL_GCR_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) msr_s SYS_GCR_EL1, x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * RGSR_EL1.SEED must be non-zero for IRG to produce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * must initialize it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) mrs x10, CNTVCT_EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ands x10, x10, #SYS_RGSR_EL1_SEED_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) csinc x10, x10, xzr, ne
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) msr_s SYS_RGSR_EL1, x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* clear any pending tag check faults in TFSR*_EL1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) msr_s SYS_TFSR_EL1, xzr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) msr_s SYS_TFSRE0_EL1, xzr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* set the TCR_EL1 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) mov_q mte_tcr, TCR_MTE_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) msr mair_el1, x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * Set/prepare TCR and TTBR. TCR_EL1.T1SZ gets further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * adjusted if the kernel is compiled with 52bit VA support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) #ifdef CONFIG_ARM64_MTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) orr x10, x10, mte_tcr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) .unreq mte_tcr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) tcr_clear_errata_bits x10, x9, x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) #ifdef CONFIG_ARM64_VA_BITS_52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) ldr_l x9, vabits_actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) sub x9, xzr, x9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) add x9, x9, #64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) tcr_set_t1sz x10, x9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ldr_l x9, idmap_t0sz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) tcr_set_t0sz x10, x9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Set the IPS bits in TCR_EL1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) #ifdef CONFIG_ARM64_HW_AFDBM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * Enable hardware update of the Access Flags bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * Hardware dirty bit management is enabled later,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * via capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) mrs x9, ID_AA64MMFR1_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) and x9, x9, #0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) cbz x9, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) orr x10, x10, #TCR_HA // hardware Access flag update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) #endif /* CONFIG_ARM64_HW_AFDBM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) msr tcr_el1, x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * Prepare SCTLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) mov_q x0, INIT_SCTLR_EL1_MMU_ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ret // return to head.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) SYM_FUNC_END(__cpu_setup)