Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  linux/arch/arm/boot/compressed/head.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Copyright (C) 1996-2002 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *  Copyright (C) 2004 Hyok S. Choi (MPU support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <asm/v7m.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include "efi-header.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  AR_CLASS(	.arch	armv7-a	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  M_CLASS(	.arch	armv7-m	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * Debugging stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * Note that these macros must not contain any code which is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * 100% relocatable.  Any attempt to do so will result in a crash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * Please select one of the following when turning on debugging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #if defined(CONFIG_DEBUG_ICEDCC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 		.macro	loadsp, rb, tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 		.macro	writeb, ch, rb, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 		mcr	p14, 0, \ch, c0, c5, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #elif defined(CONFIG_CPU_XSCALE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 		.macro	loadsp, rb, tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 		.macro	writeb, ch, rb, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 		mcr	p14, 0, \ch, c8, c0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 		.macro	loadsp, rb, tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 		.macro	writeb, ch, rb, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 		mcr	p14, 0, \ch, c1, c0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include CONFIG_DEBUG_LL_INCLUDE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 		.macro	writeb,	ch, rb, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #ifdef CONFIG_DEBUG_UART_FLOW_CONTROL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 		waituartcts \tmp, \rb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		waituarttxrdy \tmp, \rb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 		senduart \ch, \rb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 		busyuart \tmp, \rb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #if defined(CONFIG_ARCH_SA1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		.macro	loadsp, rb, tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		mov	\rb, #0x80000000	@ physical base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #ifdef CONFIG_DEBUG_LL_SER3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		add	\rb, \rb, #0x00050000	@ Ser3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		add	\rb, \rb, #0x00010000	@ Ser1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		.macro	loadsp,	rb, tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		addruart \rb, \tmp1, \tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		.macro	kputc,val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		mov	r0, \val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		bl	putc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		.macro	kphex,val,len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		mov	r0, \val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		mov	r1, #\len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		bl	phex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		 * Debug kernel copy by printing the memory addresses involved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		.macro dbgkc, begin, end, cbegin, cend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 		kputc   #'C'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		kputc   #':'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		kputc   #'0'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		kputc   #'x'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		kphex   \begin, 8	/* Start of compressed kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		kputc	#'-'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		kputc	#'0'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		kputc	#'x'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		kphex	\end, 8		/* End of compressed kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		kputc	#'-'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		kputc	#'>'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		kputc   #'0'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		kputc   #'x'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		kphex   \cbegin, 8	/* Start of kernel copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		kputc	#'-'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		kputc	#'0'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		kputc	#'x'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		kphex	\cend, 8	/* End of kernel copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		kputc	#'\n'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		 * Debug print of the final appended DTB location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		.macro dbgadtb, begin, size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		kputc   #'D'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		kputc   #'T'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		kputc   #'B'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		kputc   #':'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		kputc   #'0'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		kputc   #'x'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		kphex   \begin, 8	/* Start of appended DTB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		kputc	#' '
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		kputc	#'('
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		kputc	#'0'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		kputc	#'x'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		kphex	\size, 8	/* Size of appended DTB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		kputc	#')'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		kputc	#'\n'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		.macro	enable_cp15_barriers, reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		mrc	p15, 0, \reg, c1, c0, 0	@ read SCTLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		tst	\reg, #(1 << 5)		@ CP15BEN bit set?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		bne	.L_\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		orr	\reg, \reg, #(1 << 5)	@ CP15 barrier instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		mcr	p15, 0, \reg, c1, c0, 0	@ write SCTLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  ARM(		.inst   0xf57ff06f		@ v7+ isb	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  THUMB(		isb						)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) .L_\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		 * The kernel build system appends the size of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		 * decompressed kernel at the end of the compressed data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		 * in little-endian form.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		.macro	get_inflated_image_size, res:req, tmp1:req, tmp2:req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		adr	\res, .Linflated_image_size_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		ldr	\tmp1, [\res]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		add	\tmp1, \tmp1, \res	@ address of inflated image size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		ldrb	\res, [\tmp1]		@ get_unaligned_le32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		ldrb	\tmp2, [\tmp1, #1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		orr	\res, \res, \tmp2, lsl #8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		ldrb	\tmp2, [\tmp1, #2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		ldrb	\tmp1, [\tmp1, #3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		orr	\res, \res, \tmp2, lsl #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		orr	\res, \res, \tmp1, lsl #24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		.macro	be32tocpu, val, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) #ifndef __ARMEB__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		/* convert to little endian */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		rev_l	\val, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		.section ".start", "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  * sort out different calling conventions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		.align
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		 * Always enter in ARM state for CPUs that support the ARM ISA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		 * As of today (2014) that's exactly the members of the A and R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		 * classes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  AR_CLASS(	.arm	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		.type	start,#function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		 * These 7 nops along with the 1 nop immediately below for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		 * !THUMB2 form 8 nops that make the compressed kernel bootable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		 * on legacy ARM systems that were assuming the kernel in a.out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		 * binary format. The boot loaders on these systems would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		 * jump 32 bytes into the image to skip the a.out header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		 * with these 8 nops filling exactly 32 bytes, things still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		 * work as expected on these legacy systems. Thumb2 mode keeps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		 * 7 of the nops as it turns out that some boot loaders
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		 * were patching the initial instructions of the kernel, i.e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		 * had started to exploit this "patch area".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		__initial_nops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		.rept	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		__nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		.endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) #ifndef CONFIG_THUMB2_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		__nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  AR_CLASS(	sub	pc, pc, #3	)	@ A/R: switch to Thumb2 mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208)   M_CLASS(	nop.w			)	@ M: already in Thumb2 mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		.thumb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		W(b)	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		.word	_magic_sig	@ Magic numbers to help the loader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		.word	_magic_start	@ absolute load/run zImage address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		.word	_magic_end	@ zImage end address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		.word	0x04030201	@ endianness flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		.word	0x45454545	@ another magic number to indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		.word	_magic_table	@ additional data table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		__EFI_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  ARM_BE8(	setend	be		)	@ go BE8 if compiled for BE8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  AR_CLASS(	mrs	r9, cpsr	)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) #ifdef CONFIG_ARM_VIRT_EXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		bl	__hyp_stub_install	@ get into SVC mode, reversibly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		mov	r7, r1			@ save architecture ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		mov	r8, r2			@ save atags pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) #ifndef CONFIG_CPU_V7M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		 * Booting from Angel - need to enter SVC mode and disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		 * FIQs/IRQs (numeric definitions from angel arm.h source).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		 * We only do this if we were in user mode on entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		mrs	r2, cpsr		@ get current mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		tst	r2, #3			@ not user?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		bne	not_angel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		mov	r0, #0x17		@ angel_SWIreason_EnterSVC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240)  ARM(		swi	0x123456	)	@ angel_SWI_ARM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  THUMB(		svc	0xab		)	@ angel_SWI_THUMB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) not_angel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		safe_svcmode_maskall r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		msr	spsr_cxsf, r9		@ Save the CPU boot mode in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 						@ SPSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		 * Note that some cache flushing and other stuff may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		 * be needed here - is there an Angel SWI call for this?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		 * some architecture specific code can be inserted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		 * by the linker here, but it should preserve r7, r8, and r9.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		.text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) #ifdef CONFIG_AUTO_ZRELADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		 * Find the start of physical memory.  As we are executing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		 * without the MMU on, we are in the physical address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		 * We just need to get rid of any offset by aligning the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		 * address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		 * This alignment is a balance between the requirements of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		 * different platforms - we have chosen 128MB to allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		 * platforms which align the start of their physical memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		 * to 128MB to use this feature, while allowing the zImage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		 * to be placed within the first 128MB of memory on other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		 * platforms.  Increasing the alignment means we place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		 * stricter alignment requirements on the start of physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		 * memory, but relaxing it means that we break people who
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		 * are already placing their zImage in (eg) the top 64MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		 * of this range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		mov	r4, pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		and	r4, r4, #0xf8000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		/* Determine final kernel image address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		add	r4, r4, #(TEXT_OFFSET & 0xffff0000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		add	r4, r4, #(TEXT_OFFSET & 0x0000ffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		ldr	r4, =zreladdr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		 * Set up a page table only if it won't overwrite ourself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		 * That means r4 < pc || r4 - 16k page directory > &_end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		 * Given that r4 > &_end is most unfrequent, we add a rough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		 * additional 1MB of room for a possible appended DTB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		mov	r0, pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		cmp	r0, r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		ldrcc	r0, .Lheadroom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		addcc	r0, r0, pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		cmpcc	r4, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		orrcc	r4, r4, #1		@ remember we skipped cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		blcs	cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) restart:	adr	r0, LC1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		ldr	sp, [r0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		ldr	r6, [r0, #4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		add	sp, sp, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		add	r6, r6, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		get_inflated_image_size	r9, r10, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) #ifndef CONFIG_ZBOOT_ROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		/* malloc space is above the relocated stack (64k max) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		add	r10, sp, #MALLOC_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		 * With ZBOOT_ROM the bss/stack is non relocatable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		 * but someone could still run this code from RAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		 * in which case our reference is _edata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		mov	r10, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		mov	r5, #0			@ init dtb size to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) #ifdef CONFIG_ARM_APPENDED_DTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  *   r4  = final kernel address (possibly with LSB set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)  *   r5  = appended dtb size (still unknown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)  *   r6  = _edata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  *   r7  = architecture ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  *   r8  = atags/device tree pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  *   r9  = size of decompressed image
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  *   r10 = end of this image, including  bss/stack/malloc space if non XIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  *   sp  = stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * if there are device trees (dtb) appended to zImage, advance r10 so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  * dtb data will get relocated along with the kernel if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		ldr	lr, [r6, #0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) #ifndef __ARMEB__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		ldr	r1, =0xedfe0dd0		@ sig is 0xd00dfeed big endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		ldr	r1, =0xd00dfeed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		cmp	lr, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		bne	dtb_check_done		@ not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) #ifdef CONFIG_ARM_ATAG_DTB_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		 * OK... Let's do some funky business here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		 * If we do have a DTB appended to zImage, and we do have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		 * an ATAG list around, we want the later to be translated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		 * and folded into the former here. No GOT fixup has occurred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		 * yet, but none of the code we're about to call uses any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		 * global variable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		/* Get the initial DTB size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		ldr	r5, [r6, #4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		be32tocpu r5, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		dbgadtb	r6, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		/* 50% DTB growth should be good enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		add	r5, r5, r5, lsr #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		/* preserve 64-bit alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		add	r5, r5, #7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		bic	r5, r5, #7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		/* clamp to 32KB min and 1MB max */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		cmp	r5, #(1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		movlo	r5, #(1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		cmp	r5, #(1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		movhi	r5, #(1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		/* temporarily relocate the stack past the DTB work space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		add	sp, sp, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		mov	r0, r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		mov	r1, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		mov	r2, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		bl	atags_to_fdt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		 * If returned value is 1, there is no ATAG at the location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		 * pointed by r8.  Try the typical 0x100 offset from start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		 * of RAM and hope for the best.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		cmp	r0, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		sub	r0, r4, #(TEXT_OFFSET & 0xffff0000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		sub	r0, r0, #(TEXT_OFFSET & 0x0000ffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		bic	r0, r0, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		add	r0, r0, #0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		mov	r1, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		mov	r2, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		bleq	atags_to_fdt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		sub	sp, sp, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		mov	r8, r6			@ use the appended device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		 * Make sure that the DTB doesn't end up in the final
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		 * kernel's .bss area. To do so, we adjust the decompressed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		 * kernel size to compensate if that .bss size is larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		 * than the relocated code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		ldr	r5, =_kernel_bss_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		adr	r1, wont_overwrite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		sub	r1, r6, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		subs	r1, r5, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		addhi	r9, r9, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		/* Get the current DTB size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		ldr	r5, [r6, #4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		be32tocpu r5, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		/* preserve 64-bit alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		add	r5, r5, #7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		bic	r5, r5, #7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		/* relocate some pointers past the appended dtb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		add	r6, r6, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		add	r10, r10, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		add	sp, sp, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) dtb_check_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  * Check to see if we will overwrite ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  *   r4  = final kernel address (possibly with LSB set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  *   r9  = size of decompressed image
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  *   r10 = end of this image, including  bss/stack/malloc space if non XIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  * We basically want:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429)  *   r4 - 16k page directory >= r10 -> OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  *   r4 + image length <= address of wont_overwrite -> OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431)  * Note: the possible LSB in r4 is harmless here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		add	r10, r10, #16384
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		cmp	r4, r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		bhs	wont_overwrite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		add	r10, r4, r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		adr	r9, wont_overwrite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		cmp	r10, r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		bls	wont_overwrite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442)  * Relocate ourselves past the end of the decompressed kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443)  *   r6  = _edata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444)  *   r10 = end of the decompressed kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445)  * Because we always copy ahead, we need to do it from the end and go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446)  * backward in case the source and destination overlap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		 * Bump to the next 256-byte boundary with the size of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		 * the relocation code added. This avoids overwriting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		 * ourself when the offset is small.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		add	r10, r10, #((reloc_code_end - restart + 256) & ~255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		bic	r10, r10, #255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		/* Get start of code we want to copy and align it down. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		adr	r5, restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		bic	r5, r5, #31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) /* Relocate the hyp vector base if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) #ifdef CONFIG_ARM_VIRT_EXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		mrs	r0, spsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		and	r0, r0, #MODE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		cmp	r0, #HYP_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		bne	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		 * Compute the address of the hyp vectors after relocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		 * This requires some arithmetic since we cannot directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		 * reference __hyp_stub_vectors in a PC-relative way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		 * Call __hyp_set_vectors with the new address so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		 * can HVC again after the copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 0:		adr	r0, 0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		movw	r1, #:lower16:__hyp_stub_vectors - 0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		movt	r1, #:upper16:__hyp_stub_vectors - 0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		add	r0, r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		sub	r0, r0, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		add	r0, r0, r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		bl	__hyp_set_vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		sub	r9, r6, r5		@ size to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		add	r9, r9, #31		@ rounded up to a multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		bic	r9, r9, #31		@ ... of 32 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		add	r6, r9, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		add	r9, r9, r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		sub     r10, r6, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		sub     r10, r9, r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		 * We are about to copy the kernel to a new memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		 * The boundaries of the new memory area can be found in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		 * r10 and r9, whilst r5 and r6 contain the boundaries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		 * of the memory we are going to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		 * Calling dbgkc will help with the printing of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		 * information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		dbgkc	r5, r6, r10, r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 1:		ldmdb	r6!, {r0 - r3, r10 - r12, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		cmp	r6, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		stmdb	r9!, {r0 - r3, r10 - r12, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		bhi	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		/* Preserve offset to relocated code. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		sub	r6, r9, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		mov	r0, r9			@ start of relocated zImage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		add	r1, sp, r6		@ end of relocated zImage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		bl	cache_clean_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		badr	r0, restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		add	r0, r0, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		mov	pc, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) wont_overwrite:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		adr	r0, LC0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		ldmia	r0, {r1, r2, r3, r11, r12}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		sub	r0, r0, r1		@ calculate the delta offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526)  * If delta is zero, we are running at the address we were linked at.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527)  *   r0  = delta
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  *   r2  = BSS start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  *   r3  = BSS end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  *   r4  = kernel execution address (possibly with LSB set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531)  *   r5  = appended dtb size (0 if not present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532)  *   r7  = architecture ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533)  *   r8  = atags pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534)  *   r11 = GOT start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535)  *   r12 = GOT end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536)  *   sp  = stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		orrs	r1, r0, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		beq	not_relocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		add	r11, r11, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		add	r12, r12, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) #ifndef CONFIG_ZBOOT_ROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		 * we need to fix up pointers into the BSS region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		 * Note that the stack pointer has already been fixed up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		add	r2, r2, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		add	r3, r3, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		 * Relocate all entries in the GOT table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		 * Bump bss entries to _edata + dtb size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 1:		ldr	r1, [r11, #0]		@ relocate entries in the GOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		add	r1, r1, r0		@ This fixes up C references
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		cmp	r1, r2			@ if entry >= bss_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		cmphs	r3, r1			@       bss_end > entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		addhi	r1, r1, r5		@    entry += dtb size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		str	r1, [r11], #4		@ next entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		cmp	r11, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		/* bump our bss pointers too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		add	r2, r2, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		add	r3, r3, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		 * Relocate entries in the GOT table.  We only relocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		 * the entries that are outside the (relocated) BSS region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 1:		ldr	r1, [r11, #0]		@ relocate entries in the GOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		cmp	r1, r2			@ entry < bss_start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		cmphs	r3, r1			@ _end < entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		addlo	r1, r1, r0		@ table.  This fixes up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		str	r1, [r11], #4		@ C references.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		cmp	r11, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) not_relocated:	mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 1:		str	r0, [r2], #4		@ clear bss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		str	r0, [r2], #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		str	r0, [r2], #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		str	r0, [r2], #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		cmp	r2, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		blo	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		 * Did we skip the cache setup earlier?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		 * That is indicated by the LSB in r4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		 * Do it now if so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		tst	r4, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		bic	r4, r4, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		blne	cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603)  * The C runtime environment should now be setup sufficiently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604)  * Set up some pointers, and start decompressing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605)  *   r4  = kernel execution address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606)  *   r7  = architecture ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607)  *   r8  = atags pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		mov	r0, r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		mov	r1, sp			@ malloc space above stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		add	r2, sp, #MALLOC_SIZE	@ 64k max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		mov	r3, r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		bl	decompress_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		get_inflated_image_size	r1, r2, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		mov	r0, r4			@ start of inflated image
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		add	r1, r1, r0		@ end of inflated image
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		bl	cache_clean_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		bl	cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) #ifdef CONFIG_ARM_VIRT_EXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		mrs	r0, spsr		@ Get saved CPU boot mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		and	r0, r0, #MODE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		cmp	r0, #HYP_MODE		@ if not booted in HYP mode...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		bne	__enter_kernel		@ boot kernel directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		adr	r12, .L__hyp_reentry_vectors_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		ldr	r0, [r12]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		add	r0, r0, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		bl	__hyp_set_vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		__HVC(0)			@ otherwise bounce to hyp mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		b	.			@ should never be reached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		.align	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) .L__hyp_reentry_vectors_offset:	.long	__hyp_reentry_vectors - .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		b	__enter_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		.align	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		.type	LC0, #object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) LC0:		.word	LC0			@ r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		.word	__bss_start		@ r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		.word	_end			@ r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		.word	_got_start		@ r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		.word	_got_end		@ ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		.size	LC0, . - LC0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		.type	LC1, #object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) LC1:		.word	.L_user_stack_end - LC1	@ sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		.word	_edata - LC1		@ r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		.size	LC1, . - LC1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) .Lheadroom:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		.word	_end - restart + 16384 + 1024*1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) .Linflated_image_size_offset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		.long	(input_data_end - 4) - .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) #ifdef CONFIG_ARCH_RPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		.globl	params
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) params:		ldr	r0, =0x10000100		@ params_phys for RPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		.ltorg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		.align
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672)  * dcache_line_size - get the minimum D-cache line size from the CTR register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673)  * on ARMv7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		.macro	dcache_line_size, reg, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) #ifdef CONFIG_CPU_V7M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		movw	\tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		movt	\tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		ldr	\tmp, [\tmp]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		mrc	p15, 0, \tmp, c0, c0, 1		@ read ctr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		lsr	\tmp, \tmp, #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		and	\tmp, \tmp, #0xf		@ cache line size encoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		mov	\reg, #4			@ bytes per word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		mov	\reg, \reg, lsl \tmp		@ actual cache line size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690)  * Turn on the cache.  We need to setup some page tables so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691)  * can have both the I and D caches on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693)  * We place the page tables 16k down from the kernel execution address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694)  * and we hope that nothing else is using it.  If we're using it, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695)  * will go pop!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  * On entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698)  *  r4 = kernel execution address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699)  *  r7 = architecture number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700)  *  r8 = atags pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701)  * On exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  *  r0, r1, r2, r3, r9, r10, r12 corrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703)  * This routine must preserve:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704)  *  r4, r7, r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		.align	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) cache_on:	mov	r3, #8			@ cache_on function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		b	call_cache_fn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711)  * Initialize the highest priority protection region, PR7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712)  * to cover all 32bit address and cacheable and bufferable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) __armv4_mpu_cache_on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		mov	r0, #0x3f		@ 4G, the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		mcr	p15, 0, r0, c6, c7, 0	@ PR7 Area Setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		mcr 	p15, 0, r0, c6, c7, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		mov	r0, #0x80		@ PR7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		mcr	p15, 0, r0, c2, c0, 0	@ D-cache on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		mcr	p15, 0, r0, c2, c0, 1	@ I-cache on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		mcr	p15, 0, r0, c3, c0, 0	@ write-buffer on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		mov	r0, #0xc000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		mcr	p15, 0, r0, c5, c0, 1	@ I-access permission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		mcr	p15, 0, r0, c5, c0, 0	@ D-access permission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		mcr	p15, 0, r0, c7, c5, 0	@ flush(inval) I-Cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		mcr	p15, 0, r0, c7, c6, 0	@ flush(inval) D-Cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 						@ ...I .... ..D. WC.M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		orr	r0, r0, #0x002d		@ .... .... ..1. 11.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		orr	r0, r0, #0x1000		@ ...1 .... .... ....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		mcr	p15, 0, r0, c1, c0, 0	@ write control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		mcr	p15, 0, r0, c7, c5, 0	@ flush(inval) I-Cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		mcr	p15, 0, r0, c7, c6, 0	@ flush(inval) D-Cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) __armv3_mpu_cache_on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		mov	r0, #0x3f		@ 4G, the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		mcr	p15, 0, r0, c6, c7, 0	@ PR7 Area Setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		mov	r0, #0x80		@ PR7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		mcr	p15, 0, r0, c2, c0, 0	@ cache on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		mcr	p15, 0, r0, c3, c0, 0	@ write-buffer on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		mov	r0, #0xc000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		mcr	p15, 0, r0, c5, c0, 0	@ access permission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		 * ?? ARMv3 MMU does not allow reading the control register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		 * does this really work on ARMv3 MPU?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 						@ .... .... .... WC.M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		orr	r0, r0, #0x000d		@ .... .... .... 11.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		/* ?? this overwrites the value constructed above? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		mcr	p15, 0, r0, c1, c0, 0	@ write control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		/* ?? invalidate for the second time? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) #define CB_BITS 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) #define CB_BITS 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) __setup_mmu:	sub	r3, r4, #16384		@ Page directory size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		bic	r3, r3, #0xff		@ Align the pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		bic	r3, r3, #0x3f00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782)  * Initialise the page tables, turning on the cacheable and bufferable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783)  * bits for the RAM area only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		mov	r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		mov	r9, r0, lsr #20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		mov	r9, r9, lsl #20		@ start of RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		add	r10, r9, #0x10000000	@ a reasonable RAM size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		mov	r1, #0x12		@ XN|U + section mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		orr	r1, r1, #3 << 10	@ AP=11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		add	r2, r3, #16384
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 1:		cmp	r1, r9			@ if virt > start of RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		cmphs	r10, r1			@   && end of RAM > virt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		bic	r1, r1, #0x1c		@ clear XN|U + C + B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		orrlo	r1, r1, #0x10		@ Set XN|U for non-RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		orrhs	r1, r1, r6		@ set RAM section settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		str	r1, [r0], #4		@ 1:1 mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		add	r1, r1, #1048576
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		teq	r0, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		bne	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802)  * If ever we are running from Flash, then we surely want the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803)  * to be enabled also for our execution instance...  We map 2MB of it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804)  * so there is no map overlap problem for up to 1 MB compressed kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805)  * If the execution is in RAM then we would only be duplicating the above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		orr	r1, r6, #0x04		@ ensure B is set for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		orr	r1, r1, #3 << 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		mov	r2, pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		mov	r2, r2, lsr #20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		orr	r1, r1, r2, lsl #20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		add	r0, r3, r2, lsl #2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		str	r1, [r0], #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		add	r1, r1, #1048576
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		str	r1, [r0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) ENDPROC(__setup_mmu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) @ Enable unaligned access on v6, to allow better code generation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) @ for the decompressor C code:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) __armv6_mmu_cache_on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		mrc	p15, 0, r0, c1, c0, 0	@ read SCTLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		bic	r0, r0, #2		@ A (no unaligned access fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		orr	r0, r0, #1 << 22	@ U (v6 unaligned access model)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		mcr	p15, 0, r0, c1, c0, 0	@ write SCTLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		b	__armv4_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) __arm926ejs_mmu_cache_on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		mov	r0, #4			@ put dcache in WT mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		mcr	p15, 7, r0, c15, c0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) __armv4_mmu_cache_on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		mov	r12, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		mov	r6, #CB_BITS | 0x12	@ U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		bl	__setup_mmu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		mcr	p15, 0, r0, c8, c7, 0	@ flush I,D TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		orr	r0, r0, #0x5000		@ I-cache enable, RR cache replacement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		orr	r0, r0, #0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845)  ARM_BE8(	orr	r0, r0, #1 << 25 )	@ big-endian page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		bl	__common_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		mcr	p15, 0, r0, c8, c7, 0	@ flush I,D TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		mov	pc, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) __armv7_mmu_cache_on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		enable_cp15_barriers	r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		mov	r12, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		mrc	p15, 0, r11, c0, c1, 4	@ read ID_MMFR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		tst	r11, #0xf		@ VMSA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		movne	r6, #CB_BITS | 0x02	@ !XN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		blne	__setup_mmu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		tst	r11, #0xf		@ VMSA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		mcrne	p15, 0, r0, c8, c7, 0	@ flush I,D TLBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		bic	r0, r0, #1 << 28	@ clear SCTLR.TRE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		orr	r0, r0, #0x5000		@ I-cache enable, RR cache replacement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		orr	r0, r0, #0x003c		@ write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		bic	r0, r0, #2		@ A (no unaligned access fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		orr	r0, r0, #1 << 22	@ U (v6 unaligned access model)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 						@ (needed for ARM1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873)  ARM_BE8(	orr	r0, r0, #1 << 25 )	@ big-endian page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		mrcne   p15, 0, r6, c2, c0, 2   @ read ttb control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		orrne	r0, r0, #1		@ MMU enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		movne	r1, #0xfffffffd		@ domain 0 = client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		bic     r6, r6, #1 << 31        @ 32-bit translation system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		bic     r6, r6, #(7 << 0) | (1 << 4)	@ use only ttbr0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		mcrne	p15, 0, r3, c2, c0, 0	@ load page table pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		mcrne	p15, 0, r1, c3, c0, 0	@ load domain access control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		mcrne   p15, 0, r6, c2, c0, 2   @ load ttb control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		mcr	p15, 0, r0, c7, c5, 4	@ ISB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		mcr	p15, 0, r0, c1, c0, 0	@ load control register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		mrc	p15, 0, r0, c1, c0, 0	@ and read it back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		mcr	p15, 0, r0, c7, c5, 4	@ ISB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		mov	pc, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) __fa526_cache_on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		mov	r12, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		mov	r6, #CB_BITS | 0x12	@ U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		bl	__setup_mmu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		mcr	p15, 0, r0, c7, c7, 0	@ Invalidate whole cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		mcr	p15, 0, r0, c8, c7, 0	@ flush UTLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		orr	r0, r0, #0x1000		@ I-cache enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		bl	__common_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		mcr	p15, 0, r0, c8, c7, 0	@ flush UTLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		mov	pc, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) __common_mmu_cache_on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) #ifndef CONFIG_THUMB2_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) #ifndef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		orr	r0, r0, #0x000d		@ Write buffer, mmu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		mov	r1, #-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		mcr	p15, 0, r3, c2, c0, 0	@ load page table pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		mcr	p15, 0, r1, c3, c0, 0	@ load domain access control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		b	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		.align	5			@ cache line aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 1:		mcr	p15, 0, r0, c1, c0, 0	@ load control register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		mrc	p15, 0, r0, c1, c0, 0	@ and read it back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		sub	pc, lr, r0, lsr #32	@ properly flush pipeline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) #define PROC_ENTRY_SIZE (4*5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923)  * Here follow the relocatable cache support functions for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924)  * various processors.  This is a generic hook for locating an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925)  * entry and jumping to an instruction at the specified offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926)  * from the start of the block.  Please note this is all position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927)  * independent code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929)  *  r1  = corrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  *  r2  = corrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  *  r3  = block offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  *  r9  = corrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  *  r12 = corrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) call_cache_fn:	adr	r12, proc_types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) #ifdef CONFIG_CPU_CP15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		mrc	p15, 0, r9, c0, c0	@ get processor ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) #elif defined(CONFIG_CPU_V7M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		 * On v7-M the processor id is located in the V7M_SCB_CPUID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		 * register, but as cache handling is IMPLEMENTATION DEFINED on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		 * v7-M (if existant at all) we just return early here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		 * If V7M_SCB_CPUID were used the cpu ID functions (i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		 * __armv7_mmu_cache_{on,off,flush}) would be selected which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		 * use cp15 registers that are not implemented on v7-M.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		bx	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		ldr	r9, =CONFIG_PROCESSOR_ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 1:		ldr	r1, [r12, #0]		@ get value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		ldr	r2, [r12, #4]		@ get mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		eor	r1, r1, r9		@ (real ^ match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		tst	r1, r2			@       & mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956)  ARM(		addeq	pc, r12, r3		) @ call cache function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  THUMB(		addeq	r12, r3			)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958)  THUMB(		moveq	pc, r12			) @ call cache function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		add	r12, r12, #PROC_ENTRY_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		b	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)  * Table for cache operations.  This is basically:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964)  *   - CPU ID match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965)  *   - CPU ID mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966)  *   - 'cache on' method instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967)  *   - 'cache off' method instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968)  *   - 'cache flush' method instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970)  * We match an entry using: ((real_id ^ match) & mask) == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972)  * Writethrough caches generally only need 'on' and 'off'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973)  * methods.  Writeback caches _must_ have the flush method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  * defined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		.align	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		.type	proc_types,#object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) proc_types:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		.word	0x41000000		@ old ARM ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		.word	0xff00f000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982)  THUMB(		nop				)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984)  THUMB(		nop				)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986)  THUMB(		nop				)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		.word	0x41007000		@ ARM7/710
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		.word	0xfff8fe00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991)  THUMB(		nop				)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993)  THUMB(		nop				)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  THUMB(		nop				)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		.word	0x41807200		@ ARM720T (writethrough)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		.word	0xffffff00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		W(b)	__armv4_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		W(b)	__armv4_mmu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)  THUMB(		nop				)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		.word	0x41007400		@ ARM74x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		.word	0xff00ff00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		W(b)	__armv3_mpu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		W(b)	__armv3_mpu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		W(b)	__armv3_mpu_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		.word	0x41009400		@ ARM94x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		.word	0xff00ff00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		W(b)	__armv4_mpu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		W(b)	__armv4_mpu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		W(b)	__armv4_mpu_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		.word	0x41069260		@ ARM926EJ-S (v5TEJ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		.word	0xff0ffff0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		W(b)	__arm926ejs_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		W(b)	__armv4_mmu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		W(b)	__armv5tej_mmu_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		.word	0x00007000		@ ARM7 IDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		.word	0x0000f000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)  THUMB(		nop				)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  THUMB(		nop				)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  THUMB(		nop				)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		@ Everything from here on will be the new ID system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		.word	0x4401a100		@ sa110 / sa1100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		.word	0xffffffe0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		W(b)	__armv4_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		W(b)	__armv4_mmu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		W(b)	__armv4_mmu_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		.word	0x6901b110		@ sa1110
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		.word	0xfffffff0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		W(b)	__armv4_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		W(b)	__armv4_mmu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		W(b)	__armv4_mmu_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		.word	0x56056900
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		.word	0xffffff00		@ PXA9xx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		W(b)	__armv4_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		W(b)	__armv4_mmu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		W(b)	__armv4_mmu_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		.word	0x56158000		@ PXA168
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		.word	0xfffff000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		W(b)	__armv4_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		W(b)	__armv4_mmu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		W(b)	__armv5tej_mmu_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		.word	0x56050000		@ Feroceon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		.word	0xff0f0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		W(b)	__armv4_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		W(b)	__armv4_mmu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		W(b)	__armv5tej_mmu_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) #ifdef CONFIG_CPU_FEROCEON_OLD_ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		/* this conflicts with the standard ARMv5TE entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		.long	0x41009260		@ Old Feroceon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		.long	0xff00fff0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		b	__armv4_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		b	__armv4_mmu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		b	__armv5tej_mmu_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		.word	0x66015261		@ FA526
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		.word	0xff01fff1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		W(b)	__fa526_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		W(b)	__armv4_mmu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		W(b)	__fa526_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		@ These match on the architecture ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		.word	0x00020000		@ ARMv4T
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		.word	0x000f0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		W(b)	__armv4_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		W(b)	__armv4_mmu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		W(b)	__armv4_mmu_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		.word	0x00050000		@ ARMv5TE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		.word	0x000f0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		W(b)	__armv4_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		W(b)	__armv4_mmu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		W(b)	__armv4_mmu_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		.word	0x00060000		@ ARMv5TEJ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		.word	0x000f0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		W(b)	__armv4_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		W(b)	__armv4_mmu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		W(b)	__armv5tej_mmu_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		.word	0x0007b000		@ ARMv6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		.word	0x000ff000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		W(b)	__armv6_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		W(b)	__armv4_mmu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		W(b)	__armv6_mmu_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		.word	0x000f0000		@ new CPU Id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		.word	0x000f0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		W(b)	__armv7_mmu_cache_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		W(b)	__armv7_mmu_cache_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		W(b)	__armv7_mmu_cache_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		.word	0			@ unrecognised type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		.word	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)  THUMB(		nop				)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)  THUMB(		nop				)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)  THUMB(		nop				)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		.size	proc_types, . - proc_types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		 * If you get a "non-constant expression in ".if" statement"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		 * error from the assembler on this line, check that you have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		 * not accidentally written a "b" instruction where you should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		 * have written W(b).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		.if (. - proc_types) % PROC_ENTRY_SIZE != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		.error "The size of one or more proc_types entries is wrong."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)  * Turn off the Cache and MMU.  ARMv3 does not support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)  * reading the control register, but ARMv4 does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)  * On exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)  *  r0, r1, r2, r3, r9, r12 corrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)  * This routine must preserve:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)  *  r4, r7, r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		.align	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) cache_off:	mov	r3, #12			@ cache_off function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		b	call_cache_fn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) __armv4_mpu_cache_off:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		mrc	p15, 0, r0, c1, c0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		bic	r0, r0, #0x000d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		mcr	p15, 0, r0, c1, c0	@ turn MPU and cache off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		mcr	p15, 0, r0, c7, c10, 4	@ drain write buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		mcr	p15, 0, r0, c7, c6, 0	@ flush D-Cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		mcr	p15, 0, r0, c7, c5, 0	@ flush I-Cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) __armv3_mpu_cache_off:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		mrc	p15, 0, r0, c1, c0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		bic	r0, r0, #0x000d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		mcr	p15, 0, r0, c1, c0, 0	@ turn MPU and cache off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		mcr	p15, 0, r0, c7, c0, 0	@ invalidate whole cache v3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) __armv4_mmu_cache_off:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		mrc	p15, 0, r0, c1, c0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		bic	r0, r0, #0x000d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		mcr	p15, 0, r0, c1, c0	@ turn MMU and cache off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		mcr	p15, 0, r0, c7, c7	@ invalidate whole cache v4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		mcr	p15, 0, r0, c8, c7	@ invalidate whole TLB v4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) __armv7_mmu_cache_off:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		mrc	p15, 0, r0, c1, c0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		bic	r0, r0, #0x0005
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		bic	r0, r0, #0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		mcr	p15, 0, r0, c1, c0	@ turn MMU and cache off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		mcr	p15, 0, r0, c8, c7, 0	@ invalidate whole TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		mcr	p15, 0, r0, c7, c5, 6	@ invalidate BTC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		mcr	p15, 0, r0, c7, c10, 4	@ DSB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		mcr	p15, 0, r0, c7, c5, 4	@ ISB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)  * Clean and flush the cache to maintain consistency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)  * On entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)  *  r0 = start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)  *  r1 = end address (exclusive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)  * On exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)  *  r1, r2, r3, r9, r10, r11, r12 corrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)  * This routine must preserve:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)  *  r4, r6, r7, r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		.align	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) cache_clean_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		mov	r3, #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		mov	r11, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		b	call_cache_fn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) __armv4_mpu_cache_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		tst	r4, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		movne	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		mov	r2, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		mov	r3, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		mcr	p15, 0, ip, c7, c6, 0	@ invalidate D cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		mov	r1, #7 << 5		@ 8 segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 1:		orr	r3, r1, #63 << 26	@ 64 entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 2:		mcr	p15, 0, r3, c7, c14, 2	@ clean & invalidate D index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		subs	r3, r3, #1 << 26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		bcs	2b			@ entries 63 to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		subs 	r1, r1, #1 << 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		bcs	1b			@ segments 7 to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		teq	r2, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		mcrne	p15, 0, ip, c7, c5, 0	@ invalidate I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		mcr	p15, 0, ip, c7, c10, 4	@ drain WB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) __fa526_cache_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		tst	r4, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		movne	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		mov	r1, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		mcr	p15, 0, r1, c7, c14, 0	@ clean and invalidate D cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		mcr	p15, 0, r1, c7, c5, 0	@ flush I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		mcr	p15, 0, r1, c7, c10, 4	@ drain WB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) __armv6_mmu_cache_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		mov	r1, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		tst	r4, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		mcreq	p15, 0, r1, c7, c14, 0	@ clean+invalidate D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		mcr	p15, 0, r1, c7, c5, 0	@ invalidate I+BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		mcreq	p15, 0, r1, c7, c15, 0	@ clean+invalidate unified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		mcr	p15, 0, r1, c7, c10, 4	@ drain WB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) __armv7_mmu_cache_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		enable_cp15_barriers	r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		tst	r4, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		bne	iflush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		mrc	p15, 0, r10, c0, c1, 5	@ read ID_MMFR1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		tst	r10, #0xf << 16		@ hierarchical cache (ARMv7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		mov	r10, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		beq	hierarchical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		mcr	p15, 0, r10, c7, c14, 0	@ clean+invalidate D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		b	iflush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) hierarchical:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		dcache_line_size r1, r2		@ r1 := dcache min line size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		sub	r2, r1, #1		@ r2 := line size mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		bic	r0, r0, r2		@ round down start to line size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		sub	r11, r11, #1		@ end address is exclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		bic	r11, r11, r2		@ round down end to line size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 0:		cmp	r0, r11			@ finished?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		bgt	iflush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		mcr	p15, 0, r0, c7, c14, 1	@ Dcache clean/invalidate by VA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		add	r0, r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		b	0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) iflush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		mcr	p15, 0, r10, c7, c10, 4	@ DSB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		mcr	p15, 0, r10, c7, c5, 0	@ invalidate I+BTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		mcr	p15, 0, r10, c7, c10, 4	@ DSB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		mcr	p15, 0, r10, c7, c5, 4	@ ISB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) __armv5tej_mmu_cache_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		tst	r4, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		movne	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 1:		mrc	p15, 0, APSR_nzcv, c7, c14, 3	@ test,clean,invalidate D cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		bne	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		mcr	p15, 0, r0, c7, c5, 0	@ flush I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		mcr	p15, 0, r0, c7, c10, 4	@ drain WB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) __armv4_mmu_cache_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		tst	r4, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		movne	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		mov	r2, #64*1024		@ default: 32K dcache size (*2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		mov	r11, #32		@ default: 32 byte line size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		mrc	p15, 0, r3, c0, c0, 1	@ read cache type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		teq	r3, r9			@ cache ID register present?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		beq	no_cache_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		mov	r1, r3, lsr #18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		and	r1, r1, #7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		mov	r2, #1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		mov	r2, r2, lsl r1		@ base dcache size *2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		tst	r3, #1 << 14		@ test M bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		addne	r2, r2, r2, lsr #1	@ +1/2 size if M == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		mov	r3, r3, lsr #12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		and	r3, r3, #3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		mov	r11, #8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		mov	r11, r11, lsl r3	@ cache line size in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) no_cache_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		mov	r1, pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		bic	r1, r1, #63		@ align to longest cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		add	r2, r1, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)  ARM(		ldr	r3, [r1], r11		) @ s/w flush D cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)  THUMB(		ldr     r3, [r1]		) @ s/w flush D cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)  THUMB(		add     r1, r1, r11		)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		teq	r1, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		bne	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		mcr	p15, 0, r1, c7, c5, 0	@ flush I cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		mcr	p15, 0, r1, c7, c6, 0	@ flush D cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		mcr	p15, 0, r1, c7, c10, 4	@ drain WB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) __armv3_mmu_cache_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) __armv3_mpu_cache_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		tst	r4, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		movne	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		mov	r1, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		mcr	p15, 0, r1, c7, c0, 0	@ invalidate whole cache v3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)  * Various debugging routines for printing hex characters and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)  * memory, which again must be relocatable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		.align	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		.type	phexbuf,#object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) phexbuf:	.space	12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		.size	phexbuf, . - phexbuf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) @ phex corrupts {r0, r1, r2, r3}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) phex:		adr	r3, phexbuf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		mov	r2, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		strb	r2, [r3, r1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 1:		subs	r1, r1, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		movmi	r0, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		bmi	puts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		and	r2, r0, #15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		mov	r0, r0, lsr #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		cmp	r2, #10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		addge	r2, r2, #7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		add	r2, r2, #'0'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		strb	r2, [r3, r1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		b	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) @ puts corrupts {r0, r1, r2, r3}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) puts:		loadsp	r3, r2, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 1:		ldrb	r2, [r0], #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		teq	r2, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		moveq	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 2:		writeb	r2, r3, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		mov	r1, #0x00020000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 3:		subs	r1, r1, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		bne	3b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		teq	r2, #'\n'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		moveq	r2, #'\r'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		beq	2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		teq	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		bne	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) @ putc corrupts {r0, r1, r2, r3}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) putc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		mov	r2, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		loadsp	r3, r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		b	2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) memdump:	mov	r12, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		mov	r10, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		mov	r11, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 2:		mov	r0, r11, lsl #2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		add	r0, r0, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		mov	r1, #8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		bl	phex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		mov	r0, #':'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		bl	putc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 1:		mov	r0, #' '
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		bl	putc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		ldr	r0, [r12, r11, lsl #2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		mov	r1, #8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		bl	phex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		and	r0, r11, #7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		teq	r0, #3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		moveq	r0, #' '
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		bleq	putc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		and	r0, r11, #7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		add	r11, r11, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		teq	r0, #7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		bne	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		mov	r0, #'\n'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		bl	putc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		cmp	r11, #64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		blt	2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		mov	pc, r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		.ltorg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) #ifdef CONFIG_ARM_VIRT_EXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) __hyp_reentry_vectors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		W(b)	.			@ reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		W(b)	.			@ undef
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) #ifdef CONFIG_EFI_STUB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		W(b)	__enter_kernel_from_hyp	@ hvc from HYP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		W(b)	.			@ svc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		W(b)	.			@ pabort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		W(b)	.			@ dabort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		W(b)	__enter_kernel		@ hyp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		W(b)	.			@ irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		W(b)	.			@ fiq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) #endif /* CONFIG_ARM_VIRT_EXT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) __enter_kernel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		mov	r0, #0			@ must be 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		mov	r1, r7			@ restore architecture number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		mov	r2, r8			@ restore atags pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)  ARM(		mov	pc, r4		)	@ call kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)  M_CLASS(	add	r4, r4, #1	)	@ enter in Thumb mode for M class
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)  THUMB(		bx	r4		)	@ entry point is always ARM for A/R classes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) reloc_code_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) #ifdef CONFIG_EFI_STUB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) __enter_kernel_from_hyp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		mrc	p15, 4, r0, c1, c0, 0	@ read HSCTLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		bic	r0, r0, #0x5		@ disable MMU and caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		mcr	p15, 4, r0, c1, c0, 0	@ write HSCTLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		b	__enter_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) ENTRY(efi_enter_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		mov	r4, r0			@ preserve image base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		mov	r8, r1			@ preserve DT pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		adr_l	r0, call_cache_fn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		adr	r1, 0f			@ clean the region of code we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		bl	cache_clean_flush	@ may run with the MMU off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) #ifdef CONFIG_ARM_VIRT_EXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		@ The EFI spec does not support booting on ARM in HYP mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		@ since it mandates that the MMU and caches are on, with all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		@ 32-bit addressable DRAM mapped 1:1 using short descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		@ While the EDK2 reference implementation adheres to this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		@ U-Boot might decide to enter the EFI stub in HYP mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		@ anyway, with the MMU and caches either on or off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		mrs	r0, cpsr		@ get the current mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		msr	spsr_cxsf, r0		@ record boot mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		and	r0, r0, #MODE_MASK	@ are we running in HYP mode?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		cmp	r0, #HYP_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		bne	.Lefi_svc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		mrc	p15, 4, r1, c1, c0, 0	@ read HSCTLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		tst	r1, #0x1		@ MMU enabled at HYP?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		beq	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		@ When running in HYP mode with the caches on, we're better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		@ off just carrying on using the cached 1:1 mapping that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		@ firmware provided. Set up the HYP vectors so HVC instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		@ issued from HYP mode take us to the correct handler code. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		@ will disable the MMU before jumping to the kernel proper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)  ARM(		bic	r1, r1, #(1 << 30)	) @ clear HSCTLR.TE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)  THUMB(		orr	r1, r1, #(1 << 30)	) @ set HSCTLR.TE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		mcr	p15, 4, r1, c1, c0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		adr	r0, __hyp_reentry_vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		mcr	p15, 4, r0, c12, c0, 0	@ set HYP vector base (HVBAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		b	.Lefi_hyp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		@ When running in HYP mode with the caches off, we need to drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		@ into SVC mode now, and let the decompressor set up its cached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		@ 1:1 mapping as usual.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 1:		mov	r9, r4			@ preserve image base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		bl	__hyp_stub_install	@ install HYP stub vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		safe_svcmode_maskall	r1	@ drop to SVC mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		msr	spsr_cxsf, r0		@ record boot mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		orr	r4, r9, #1		@ restore image base and set LSB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		b	.Lefi_hyp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) .Lefi_svc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		mrc	p15, 0, r0, c1, c0, 0	@ read SCTLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		tst	r0, #0x1		@ MMU enabled?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		orreq	r4, r4, #1		@ set LSB if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) .Lefi_hyp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		mov	r0, r8			@ DT start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		add	r1, r8, r2		@ DT end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		bl	cache_clean_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		adr	r0, 0f			@ switch to our stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		ldr	sp, [r0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		add	sp, sp, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		mov	r5, #0			@ appended DTB size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		mov	r7, #0xFFFFFFFF		@ machine ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		b	wont_overwrite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) ENDPROC(efi_enter_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 0:		.long	.L_user_stack_end - .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		.align
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		.section ".stack", "aw", %nobits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) .L_user_stack:	.space	4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) .L_user_stack_end: