Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Low-level CPU initialisation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Based on arch/arm/kernel/head.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 1994-2002 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 2003-2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Authors:	Catalin Marinas <catalin.marinas@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *		Will Deacon <will.deacon@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/asm_pointer_auth.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/boot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/el2_setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/image.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/kernel-pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/kvm_arm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/pgtable-hwdef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <asm/scs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <asm/sysreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <asm/virt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include "efi-header.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define __PHYS_OFFSET	KERNEL_START
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #if (PAGE_OFFSET & 0x1fffff) != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #error PAGE_OFFSET must be at least 2MB aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * Kernel startup entry point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * ---------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * The requirements are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  *   MMU = off, D-cache = off, I-cache = on or off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  *   x0 = physical address to the FDT blob.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * This code is mostly position independent so you call this at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * __pa(PAGE_OFFSET).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * Note that the callee-saved registers are used for storing variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * that are useful before the MMU is enabled. The allocations are described
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * in the entry routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	__HEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) _head:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	 * DO NOT MODIFY. Image header expected by Linux boot-loaders.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #ifdef CONFIG_EFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	 * This add instruction has no meaningful effect except that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	 * its opcode forms the magic "MZ" signature required by UEFI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	add	x13, x18, #0x16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	b	primary_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	b	primary_entry			// branch to kernel start, magic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	.long	0				// reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	.quad	0				// Image load offset from start of RAM, little-endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	le64sym	_kernel_size_le			// Effective size of kernel image, little-endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	le64sym	_kernel_flags_le		// Informative flags, little-endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	.quad	0				// reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	.quad	0				// reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	.quad	0				// reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	.ascii	ARM64_IMAGE_MAGIC		// Magic number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #ifdef CONFIG_EFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	.long	pe_header - _head		// Offset to the PE header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) pe_header:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	__EFI_PE_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	.long	0				// reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	__INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	 * The following callee saved general purpose registers are used on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	 * primary lowlevel boot path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	 *  Register   Scope                      Purpose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	 *  x21        primary_entry() .. start_kernel()        FDT pointer passed at boot in x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	 *  x23        primary_entry() .. start_kernel()        physical misalignment/KASLR offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	 *  x28        __create_page_tables()                   callee preserved temp register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	 *  x19/x20    __primary_switch()                       callee preserved temp registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	 *  x24        __primary_switch() .. relocate_kernel()  current RELR displacement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) SYM_CODE_START(primary_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	bl	preserve_boot_args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	bl	init_kernel_el			// w0=cpu_boot_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	adrp	x23, __PHYS_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	and	x23, x23, MIN_KIMG_ALIGN - 1	// KASLR offset, defaults to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	bl	set_cpu_boot_mode_flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	bl	__create_page_tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	 * details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	 * On return, the CPU will be ready for the MMU to be turned on and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	 * the TCR will have been set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	bl	__cpu_setup			// initialise processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	b	__primary_switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) SYM_CODE_END(primary_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  * Preserve the arguments passed by the bootloader in x0 .. x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) SYM_CODE_START_LOCAL(preserve_boot_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	mov	x21, x0				// x21=FDT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	adr_l	x0, boot_args			// record the contents of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	stp	x21, x1, [x0]			// x0 .. x3 at kernel entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	stp	x2, x3, [x0, #16]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	dmb	sy				// needed before dc ivac with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 						// MMU off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	mov	x1, #0x20			// 4 x 8 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	b	__inval_dcache_area		// tail call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) SYM_CODE_END(preserve_boot_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * Macro to create a table entry to the next page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  *	tbl:	page table address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  *	virt:	virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  *	shift:	#imm page table shift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  *	ptrs:	#imm pointers per table page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  * Preserves:	virt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  * Corrupts:	ptrs, tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  * Returns:	tbl -> next level table page address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	.macro	create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	add	\tmp1, \tbl, #PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	phys_to_pte \tmp2, \tmp1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	orr	\tmp2, \tmp2, #PMD_TYPE_TABLE	// address of next table and entry type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	lsr	\tmp1, \virt, #\shift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	sub	\ptrs, \ptrs, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	and	\tmp1, \tmp1, \ptrs		// table index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	str	\tmp2, [\tbl, \tmp1, lsl #3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	add	\tbl, \tbl, #PAGE_SIZE		// next level table page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * Macro to populate page table entries, these entries can be pointers to the next level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * or last level entries pointing to physical memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  *	tbl:	page table address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *	rtbl:	pointer to page table or physical memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  *	index:	start index to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  *	eindex:	end index to write - [index, eindex] written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  *	flags:	flags for pagetable entry to or in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  *	inc:	increment to rtbl between each entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  *	tmp1:	temporary variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  * Preserves:	tbl, eindex, flags, inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  * Corrupts:	index, tmp1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  * Returns:	rtbl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	.macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) .Lpe\@:	phys_to_pte \tmp1, \rtbl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	orr	\tmp1, \tmp1, \flags	// tmp1 = table entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	str	\tmp1, [\tbl, \index, lsl #3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	add	\rtbl, \rtbl, \inc	// rtbl = pa next level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	add	\index, \index, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	cmp	\index, \eindex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	b.ls	.Lpe\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  * Compute indices of table entries from virtual address range. If multiple entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  * were needed in the previous page table level then the next page table level is assumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  * to be composed of multiple pages. (This effectively scales the end index).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  *	vstart:	virtual address of start of range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  *	vend:	virtual address of end of range - we map [vstart, vend]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  *	shift:	shift used to transform virtual address into index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  *	ptrs:	number of entries in page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  *	istart:	index in table corresponding to vstart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  *	iend:	index in table corresponding to vend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  *	count:	On entry: how many extra entries were required in previous level, scales
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  *			  our end index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  *		On exit: returns how many extra entries required for next page table level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  * Preserves:	vstart, vend, shift, ptrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  * Returns:	istart, iend, count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	.macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	lsr	\iend, \vend, \shift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	mov	\istart, \ptrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	sub	\istart, \istart, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	and	\iend, \iend, \istart	// iend = (vend >> shift) & (ptrs - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	mov	\istart, \ptrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	mul	\istart, \istart, \count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	add	\iend, \iend, \istart	// iend += (count - 1) * ptrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 					// our entries span multiple tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	lsr	\istart, \vstart, \shift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	mov	\count, \ptrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	sub	\count, \count, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	and	\istart, \istart, \count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	sub	\count, \iend, \istart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  * Map memory for specified virtual address range. Each level of page table needed supports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  * multiple entries. If a level requires n entries the next page table level is assumed to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  * formed from n pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  *	tbl:	location of page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  *	rtbl:	address to be used for first level page table entry (typically tbl + PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  *	vstart:	virtual address of start of range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  *	vend:	virtual address of end of range - we map [vstart, vend - 1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  *	flags:	flags to use to map last level entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  *	phys:	physical address corresponding to vstart - physical memory is contiguous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  *	pgds:	the number of pgd entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  * Temporaries:	istart, iend, tmp, count, sv - these need to be different registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  * Preserves:	vstart, flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  * Corrupts:	tbl, rtbl, vend, istart, iend, tmp, count, sv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	.macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	sub \vend, \vend, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	add \rtbl, \tbl, #PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	mov \sv, \rtbl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	mov \count, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	mov \tbl, \sv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	mov \sv, \rtbl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #if SWAPPER_PGTABLE_LEVELS > 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	mov \tbl, \sv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	mov \sv, \rtbl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #if SWAPPER_PGTABLE_LEVELS > 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	mov \tbl, \sv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  * Setup the initial page tables. We only setup the barest amount which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  * required to get the kernel running. The following sections are required:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  *   - identity mapping to enable the MMU (low address, TTBR0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  *   - first few MB of the kernel linear mapping to jump to once the MMU has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  *     been enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) SYM_FUNC_START_LOCAL(__create_page_tables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	mov	x28, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	 * Invalidate the init page tables to avoid potential dirty cache lines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	 * being evicted. Other page tables are allocated in rodata as part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	 * the kernel image, and thus are clean to the PoC per the boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	 * protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	adrp	x0, init_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	adrp	x1, init_pg_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	sub	x1, x1, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	bl	__inval_dcache_area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	 * Clear the init page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	adrp	x0, init_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	adrp	x1, init_pg_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	sub	x1, x1, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 1:	stp	xzr, xzr, [x0], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	stp	xzr, xzr, [x0], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	stp	xzr, xzr, [x0], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	stp	xzr, xzr, [x0], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	subs	x1, x1, #64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	b.ne	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	mov	x7, SWAPPER_MM_MMUFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	 * Create the identity mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	adrp	x0, idmap_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	adrp	x3, __idmap_text_start		// __pa(__idmap_text_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #ifdef CONFIG_ARM64_VA_BITS_52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	mrs_s	x6, SYS_ID_AA64MMFR2_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	and	x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	mov	x5, #52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	cbnz	x6, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	mov	x5, #VA_BITS_MIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	adr_l	x6, vabits_actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	str	x5, [x6]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	dmb	sy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	dc	ivac, x6		// Invalidate potentially stale cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	 * VA_BITS may be too small to allow for an ID mapping to be created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	 * that covers system RAM if that is located sufficiently high in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	 * physical address space. So for the ID map, use an extended virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	 * range in that case, and configure an additional translation level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	 * if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	 * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	 * this number conveniently equals the number of leading zeroes in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	 * the physical address of __idmap_text_end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	adrp	x5, __idmap_text_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	clz	x5, x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	cmp	x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	b.ge	1f			// .. then skip VA range extension
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	adr_l	x6, idmap_t0sz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	str	x5, [x6]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	dmb	sy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	dc	ivac, x6		// Invalidate potentially stale cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #if (VA_BITS < 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) #define EXTRA_SHIFT	(PGDIR_SHIFT + PAGE_SHIFT - 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) #define EXTRA_PTRS	(1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	 * If VA_BITS < 48, we have to configure an additional table level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	 * First, we have to verify our assumption that the current value of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	 * VA_BITS was chosen such that all translation levels are fully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	 * utilised, and that lowering T0SZ will always result in an additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	 * translation level to be configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #if VA_BITS != EXTRA_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #error "Mismatch between VA_BITS and page size/number of translation levels"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	mov	x4, EXTRA_PTRS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	 * If VA_BITS == 48, we don't have to configure an additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	 * translation level, but the top-level table has more entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	mov	x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	str_l	x4, idmap_ptrs_per_pgd, x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	ldr_l	x4, idmap_ptrs_per_pgd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	mov	x5, x3				// __pa(__idmap_text_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	adr_l	x6, __idmap_text_end		// __pa(__idmap_text_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	 * Map the kernel image (starting with PHYS_OFFSET).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	adrp	x0, init_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	mov_q	x5, KIMAGE_VADDR		// compile time __va(_text)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	add	x5, x5, x23			// add KASLR displacement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	mov	x4, PTRS_PER_PGD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	adrp	x6, _end			// runtime __pa(_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	adrp	x3, _text			// runtime __pa(_text)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	sub	x6, x6, x3			// _end - _text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	add	x6, x6, x5			// runtime __va(_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	 * Since the page tables have been populated with non-cacheable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	 * accesses (MMU disabled), invalidate those tables again to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	 * remove any speculatively loaded cache lines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	dmb	sy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	adrp	x0, idmap_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	adrp	x1, idmap_pg_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	sub	x1, x1, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	bl	__inval_dcache_area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	adrp	x0, init_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	adrp	x1, init_pg_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	sub	x1, x1, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	bl	__inval_dcache_area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	ret	x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) SYM_FUNC_END(__create_page_tables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)  * The following fragment of code is executed with the MMU enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)  *   x0 = __PHYS_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) SYM_FUNC_START_LOCAL(__primary_switched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	adrp	x4, init_thread_union
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	add	sp, x4, #THREAD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	adr_l	x5, init_task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	msr	sp_el0, x5			// Save thread_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	adr_l	x8, vectors			// load VBAR_EL1 with virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	msr	vbar_el1, x8			// vector table address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	stp	xzr, x30, [sp, #-16]!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	mov	x29, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) #ifdef CONFIG_SHADOW_CALL_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	adr_l	scs_sp, init_shadow_call_stack	// Set shadow call stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	str_l	x21, __fdt_pointer, x5		// Save FDT pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	ldr_l	x4, kimage_vaddr		// Save the offset between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	sub	x4, x4, x0			// the kernel virtual and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	str_l	x4, kimage_voffset, x5		// physical mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	// Clear BSS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	adr_l	x0, __bss_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	mov	x1, xzr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	adr_l	x2, __bss_stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	sub	x2, x2, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	bl	__pi_memset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	dsb	ishst				// Make zero page visible to PTW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	bl	kasan_early_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	mov	x0, x21				// pass FDT address in x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	bl	early_fdt_map			// Try mapping the FDT early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	bl	init_feature_override		// Parse cpu feature overrides
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) #ifdef CONFIG_RANDOMIZE_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	tst	x23, ~(MIN_KIMG_ALIGN - 1)	// already running randomized?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	b.ne	0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	bl	kaslr_early_init		// parse FDT for KASLR options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	cbz	x0, 0f				// KASLR disabled? just proceed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	orr	x23, x23, x0			// record KASLR offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	ldp	x29, x30, [sp], #16		// we must enable KASLR, return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	ret					// to __primary_switch()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	bl	switch_to_vhe			// Prefer VHE if possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	add	sp, sp, #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	mov	x29, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	mov	x30, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	b	start_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) SYM_FUNC_END(__primary_switched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	.pushsection ".rodata", "a"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) SYM_DATA_START(kimage_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	.quad		_text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) SYM_DATA_END(kimage_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) EXPORT_SYMBOL(kimage_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	.popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)  * end early head section, begin head code that is also used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)  * hotplug and needs to have the same protections as the text region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	.section ".idmap.text","awx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)  * Starting from EL2 or EL1, configure the CPU to execute at the highest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)  * reachable EL supported by the kernel in a chosen default state. If dropping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)  * from EL2 to EL1, configure EL2 before configuring EL1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)  * Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)  * SCTLR_ELx.EOS is clear), we place an ISB prior to ERET.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)  * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)  * booted in EL1 or EL2 respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) SYM_FUNC_START(init_kernel_el)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	mov_q	x0, INIT_SCTLR_EL1_MMU_OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	msr	sctlr_el1, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	mrs	x0, CurrentEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	cmp	x0, #CurrentEL_EL2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	b.eq	init_el2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	mov_q	x0, INIT_PSTATE_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	msr	spsr_el1, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	msr	elr_el1, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	mov	w0, #BOOT_CPU_MODE_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	eret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	mov_q	x0, HCR_HOST_NVHE_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	msr	hcr_el2, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	init_el2_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	/* Hypervisor stub */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	adr_l	x0, __hyp_stub_vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	msr	vbar_el2, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	msr	elr_el2, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	mov	w0, #BOOT_CPU_MODE_EL2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	eret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) SYM_FUNC_END(init_kernel_el)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)  * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)  * in w0. See arch/arm64/include/asm/virt.h for more info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	adr_l	x1, __boot_cpu_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	cmp	w0, #BOOT_CPU_MODE_EL2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	b.ne	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	add	x1, x1, #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 1:	str	w0, [x1]			// This CPU has booted in EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	dmb	sy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	dc	ivac, x1			// Invalidate potentially stale cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) SYM_FUNC_END(set_cpu_boot_mode_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)  * These values are written with the MMU off, but read with the MMU on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)  * Writers will invalidate the corresponding address, discarding up to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)  * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)  * sufficient alignment that the CWG doesn't overlap another section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	.pushsection ".mmuoff.data.write", "aw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)  * We need to find out the CPU boot mode long after boot, so we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)  * store it in a writable variable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)  * This is not in .bss, because we set it sufficiently early that the boot-time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)  * zeroing of .bss would clobber it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) SYM_DATA_START(__boot_cpu_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	.long	BOOT_CPU_MODE_EL2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	.long	BOOT_CPU_MODE_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) SYM_DATA_END(__boot_cpu_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)  * The booting CPU updates the failed status @__early_cpu_boot_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)  * with MMU turned off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) SYM_DATA_START(__early_cpu_boot_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	.quad 	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) SYM_DATA_END(__early_cpu_boot_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	.popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	 * This provides a "holding pen" for platforms to hold all secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	 * cores are held until we're ready for them to initialise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) SYM_FUNC_START(secondary_holding_pen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	bl	init_kernel_el			// w0=cpu_boot_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	bl	set_cpu_boot_mode_flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	mrs	x0, mpidr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	mov_q	x1, MPIDR_HWID_BITMASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	and	x0, x0, x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	adr_l	x3, secondary_holding_pen_release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) pen:	ldr	x4, [x3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	cmp	x4, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	b.eq	secondary_startup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	wfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	b	pen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) SYM_FUNC_END(secondary_holding_pen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	 * Secondary entry point that jumps straight into the kernel. Only to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	 * be used where CPUs are brought online dynamically by the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) SYM_FUNC_START(secondary_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	bl	init_kernel_el			// w0=cpu_boot_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	bl	set_cpu_boot_mode_flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	b	secondary_startup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) SYM_FUNC_END(secondary_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) SYM_FUNC_START_LOCAL(secondary_startup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	 * Common entry point for secondary CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	bl	switch_to_vhe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	bl	__cpu_secondary_check52bitva
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	bl	__cpu_setup			// initialise processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	adrp	x1, swapper_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	bl	__enable_mmu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	ldr	x8, =__secondary_switched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	br	x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) SYM_FUNC_END(secondary_startup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) SYM_FUNC_START_LOCAL(__secondary_switched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	adr_l	x5, vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	msr	vbar_el1, x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	adr_l	x0, secondary_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	ldr	x1, [x0, #CPU_BOOT_STACK]	// get secondary_data.stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	cbz	x1, __secondary_too_slow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	mov	sp, x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	ldr	x2, [x0, #CPU_BOOT_TASK]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	cbz	x2, __secondary_too_slow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	msr	sp_el0, x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	scs_load x2, x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	mov	x29, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	mov	x30, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) #ifdef CONFIG_ARM64_PTR_AUTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	ptrauth_keys_init_cpu x2, x3, x4, x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	b	secondary_start_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) SYM_FUNC_END(__secondary_switched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) SYM_FUNC_START_LOCAL(__secondary_too_slow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	wfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	wfi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	b	__secondary_too_slow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) SYM_FUNC_END(__secondary_too_slow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)  * The booting CPU updates the failed status @__early_cpu_boot_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)  * with MMU turned off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)  * update_early_cpu_boot_status tmp, status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)  *  - Corrupts tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)  *  - Writes 'status' to __early_cpu_boot_status and makes sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)  *    it is committed to memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	.macro	update_early_cpu_boot_status status, tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	mov	\tmp2, #\status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	adr_l	\tmp1, __early_cpu_boot_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	str	\tmp2, [\tmp1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	dmb	sy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	dc	ivac, \tmp1			// Invalidate potentially stale cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)  * Enable the MMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)  *  x0  = SCTLR_EL1 value for turning on the MMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)  *  x1  = TTBR1_EL1 value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)  * Returns to the caller via x30/lr. This requires the caller to be covered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)  * by the .idmap.text section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)  * Checks if the selected granule size is supported by the CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)  * If it isn't, park the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) SYM_FUNC_START(__enable_mmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	mrs	x2, ID_AA64MMFR0_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	ubfx	x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	cmp	x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	b.ne	__no_granule_support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	update_early_cpu_boot_status 0, x2, x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	adrp	x2, idmap_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	phys_to_ttbr x1, x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	phys_to_ttbr x2, x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	msr	ttbr0_el1, x2			// load TTBR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	offset_ttbr1 x1, x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	msr	ttbr1_el1, x1			// load TTBR1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	set_sctlr_el1	x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) SYM_FUNC_END(__enable_mmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) SYM_FUNC_START(__cpu_secondary_check52bitva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) #ifdef CONFIG_ARM64_VA_BITS_52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	ldr_l	x0, vabits_actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	cmp	x0, #52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	b.ne	2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	mrs_s	x0, SYS_ID_AA64MMFR2_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	and	x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	cbnz	x0, 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	update_early_cpu_boot_status \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 1:	wfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	wfi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	b	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 2:	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) SYM_FUNC_END(__cpu_secondary_check52bitva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) SYM_FUNC_START_LOCAL(__no_granule_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	/* Indicate that this CPU can't boot and is stuck in the kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	update_early_cpu_boot_status \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 		CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	wfe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	wfi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	b	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) SYM_FUNC_END(__no_granule_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) #ifdef CONFIG_RELOCATABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) SYM_FUNC_START_LOCAL(__relocate_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	 * Iterate over each entry in the relocation table, and apply the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	 * relocations in place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	ldr	w9, =__rela_offset		// offset to reloc table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	ldr	w10, =__rela_size		// size of reloc table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	mov_q	x11, KIMAGE_VADDR		// default virtual offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	add	x11, x11, x23			// actual virtual offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	add	x9, x9, x11			// __va(.rela)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	add	x10, x9, x10			// __va(.rela) + sizeof(.rela)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 0:	cmp	x9, x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	b.hs	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	ldp	x12, x13, [x9], #24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	ldr	x14, [x9, #-8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	cmp	w13, #R_AARCH64_RELATIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	b.ne	0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	add	x14, x14, x23			// relocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	str	x14, [x12, x23]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	b	0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) #ifdef CONFIG_RELR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	 * Apply RELR relocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	 * RELR is a compressed format for storing relative relocations. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	 * encoded sequence of entries looks like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	 * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	 * i.e. start with an address, followed by any number of bitmaps. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	 * address entry encodes 1 relocation. The subsequent bitmap entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	 * encode up to 63 relocations each, at subsequent offsets following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	 * the last address entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	 * The bitmap entries must have 1 in the least significant bit. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 	 * assumption here is that an address cannot have 1 in lsb. Odd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	 * addresses are not supported. Any odd addresses are stored in the RELA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	 * section, which is handled above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	 * Excluding the least significant bit in the bitmap, each non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	 * bit in the bitmap represents a relocation to be applied to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	 * a corresponding machine word that follows the base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	 * word. The second least significant bit represents the machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	 * word immediately following the initial address, and each bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	 * that follows represents the next word, in linear order. As such,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	 * a single bitmap can encode up to 63 relocations in a 64-bit object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	 * In this implementation we store the address of the next RELR table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	 * entry in x9, the address being relocated by the current address or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	 * bitmap entry in x13 and the address being relocated by the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	 * bit in x14.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	 * Because addends are stored in place in the binary, RELR relocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	 * cannot be applied idempotently. We use x24 to keep track of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	 * currently applied displacement so that we can correctly relocate if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	 * __relocate_kernel is called twice with non-zero displacements (i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	 * if there is both a physical misalignment and a KASLR displacement).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	ldr	w9, =__relr_offset		// offset to reloc table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	ldr	w10, =__relr_size		// size of reloc table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	add	x9, x9, x11			// __va(.relr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	add	x10, x9, x10			// __va(.relr) + sizeof(.relr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	sub	x15, x23, x24			// delta from previous offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	cbz	x15, 7f				// nothing to do if unchanged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	mov	x24, x23			// save new offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 2:	cmp	x9, x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	b.hs	7f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	ldr	x11, [x9], #8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	tbnz	x11, #0, 3f			// branch to handle bitmaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	add	x13, x11, x23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	ldr	x12, [x13]			// relocate address entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	add	x12, x12, x15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	str	x12, [x13], #8			// adjust to start of bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	b	2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 3:	mov	x14, x13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 4:	lsr	x11, x11, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	cbz	x11, 6f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	tbz	x11, #0, 5f			// skip bit if not set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	ldr	x12, [x14]			// relocate bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	add	x12, x12, x15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	str	x12, [x14]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 5:	add	x14, x14, #8			// move to next bit's address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	b	4b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 6:	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	 * Move to the next bitmap's address. 8 is the word size, and 63 is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	 * number of significant bits in a bitmap entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	add	x13, x13, #(8 * 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	b	2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) SYM_FUNC_END(__relocate_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) SYM_FUNC_START_LOCAL(__primary_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) #ifdef CONFIG_RANDOMIZE_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 	mov	x19, x0				// preserve new SCTLR_EL1 value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 	mrs	x20, sctlr_el1			// preserve old SCTLR_EL1 value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 	adrp	x1, init_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 	bl	__enable_mmu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) #ifdef CONFIG_RELOCATABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) #ifdef CONFIG_RELR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 	mov	x24, #0				// no RELR displacement yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 	bl	__relocate_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) #ifdef CONFIG_RANDOMIZE_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 	ldr	x8, =__primary_switched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 	adrp	x0, __PHYS_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 	blr	x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	 * If we return here, we have a KASLR displacement in x23 which we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 	 * to take into account by discarding the current kernel mapping and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 	 * creating a new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 	pre_disable_mmu_workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 	msr	sctlr_el1, x20			// disable the MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 	bl	__create_page_tables		// recreate kernel mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 	tlbi	vmalle1				// Remove any stale TLB entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 	dsb	nsh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 	set_sctlr_el1	x19			// re-enable the MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	bl	__relocate_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 	ldr	x8, =__primary_switched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	adrp	x0, __PHYS_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 	br	x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) SYM_FUNC_END(__primary_switch)