Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2012 Linaro Limited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/irqchip/arm-gic-v3.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/virt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #ifndef ZIMAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * For the kernel proper, we need to find out the CPU boot mode long after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * boot, so we need to store it in a writable variable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * This is not in .bss, because we set it sufficiently early that the boot-time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * zeroing of .bss would clobber it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) .data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	.align	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) ENTRY(__boot_cpu_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	.long	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	 * Save the primary CPU boot mode. Requires 3 scratch registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	.macro	store_primary_cpu_mode	reg1, reg2, reg3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	mrs	\reg1, cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	and	\reg1, \reg1, #MODE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	adr	\reg2, .L__boot_cpu_mode_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	ldr	\reg3, [\reg2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	str	\reg1, [\reg2, \reg3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	 * Compare the current mode with the one saved on the primary CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	 * If they don't match, record that fact. The Z bit indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	 * if there's a match or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	 * Requires 3 additionnal scratch registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	.macro	compare_cpu_mode_with_primary mode, reg1, reg2, reg3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	adr	\reg2, .L__boot_cpu_mode_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	ldr	\reg3, [\reg2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	ldr	\reg1, [\reg2, \reg3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	cmp	\mode, \reg1		@ matches primary CPU boot mode?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	orrne	\reg1, \reg1, #BOOT_CPU_MODE_MISMATCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	strne	\reg1, [\reg2, \reg3]	@ record what happened and give up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #else	/* ZIMAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	.macro	store_primary_cpu_mode	reg1:req, reg2:req, reg3:req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * The zImage loader only runs on one CPU, so we don't bother with mult-CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * consistency checking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	.macro	compare_cpu_mode_with_primary mode, reg1, reg2, reg3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	cmp	\mode, \mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #endif /* ZIMAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * Hypervisor stub installation functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * These must be called with the MMU and D-cache off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * They are not ABI compliant and are only intended to be called from the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * entry points in head.S.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) @ Call this from the primary CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) ENTRY(__hyp_stub_install)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	store_primary_cpu_mode	r4, r5, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) ENDPROC(__hyp_stub_install)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	@ fall through...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) @ Secondary CPUs should call here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) ENTRY(__hyp_stub_install_secondary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	mrs	r4, cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	and	r4, r4, #MODE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	 * If the secondary has booted with a different mode, give up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	 * immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	compare_cpu_mode_with_primary	r4, r5, r6, r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	retne	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	 * Once we have given up on one CPU, we do not try to install the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	 * stub hypervisor on the remaining ones: because the saved boot mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	 * is modified, it can't compare equal to the CPSR mode field any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	 * more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	 * Otherwise...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	cmp	r4, #HYP_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	retne	lr			@ give up if the CPU is not in HYP mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * Configure HSCTLR to set correct exception endianness/instruction set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * state etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * Turn off all traps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * Eventually, CPU-specific code might be needed -- assume not for now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * This code relies on the "eret" instruction to synchronize the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * various coprocessor accesses. This is done when we switch to SVC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * (see safe_svcmode_maskall).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	@ Now install the hypervisor stub:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	W(adr)	r7, __hyp_stub_vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	mcr	p15, 4, r7, c12, c0, 0	@ set hypervisor vector base (HVBAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	@ Disable all traps, so we don't get any nasty surprise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	mov	r7, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	mcr	p15, 4, r7, c1, c1, 0	@ HCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	mcr	p15, 4, r7, c1, c1, 2	@ HCPTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	mcr	p15, 4, r7, c1, c1, 3	@ HSTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) THUMB(	orr	r7, #(1 << 30)	)	@ HSCTLR.TE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ARM_BE8(orr	r7, r7, #(1 << 25))     @ HSCTLR.EE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	mcr	p15, 4, r7, c1, c0, 0	@ HSCTLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	mrc	p15, 4, r7, c1, c1, 1	@ HDCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	and	r7, #0x1f		@ Preserve HPMN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	mcr	p15, 4, r7, c1, c1, 1	@ HDCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	@ Make sure NS-SVC is initialised appropriately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	mrc	p15, 0, r7, c1, c0, 0	@ SCTLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	orr	r7, #(1 << 5)		@ CP15 barriers enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	bic	r7, #(3 << 7)		@ Clear SED/ITD for v8 (RES0 for v7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	bic	r7, #(3 << 19)		@ WXN and UWXN disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	mcr	p15, 0, r7, c1, c0, 0	@ SCTLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	mrc	p15, 0, r7, c0, c0, 0	@ MIDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	mcr	p15, 4, r7, c0, c0, 0	@ VPIDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	mrc	p15, 0, r7, c0, c0, 5	@ MPIDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	mcr	p15, 4, r7, c0, c0, 5	@ VMPIDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	@ make CNTP_* and CNTPCT accessible from PL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	mrc	p15, 0, r7, c0, c1, 1	@ ID_PFR1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	ubfx	r7, r7, #16, #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	teq	r7, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	beq	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	mrc	p15, 4, r7, c14, c1, 0	@ CNTHCTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	orr	r7, r7, #3		@ PL1PCEN | PL1PCTEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	mcr	p15, 4, r7, c14, c1, 0	@ CNTHCTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	mov	r7, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	mcrr	p15, 4, r7, r7, c14	@ CNTVOFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	@ Disable virtual timer in case it was counting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	mrc	p15, 0, r7, c14, c3, 1	@ CNTV_CTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	bic	r7, #1			@ Clear ENABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	mcr	p15, 0, r7, c14, c3, 1	@ CNTV_CTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #ifdef CONFIG_ARM_GIC_V3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	@ Check whether GICv3 system registers are available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	mrc	p15, 0, r7, c0, c1, 1	@ ID_PFR1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	ubfx	r7, r7, #28, #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	teq	r7, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	beq	2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	@ Enable system register accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	mrc	p15, 4, r7, c12, c9, 5	@ ICC_HSRE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	orr	r7, r7, #(ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	mcr	p15, 4, r7, c12, c9, 5	@ ICC_HSRE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	@ SRE bit could be forced to 0 by firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	@ Check whether it sticks before accessing any other sysreg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	mrc	p15, 4, r7, c12, c9, 5	@ ICC_HSRE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	tst	r7, #ICC_SRE_EL2_SRE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	beq	2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	mov	r7, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	mcr	p15, 4, r7, c12, c11, 0	@ ICH_HCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	bx	lr			@ The boot CPU mode is left in r4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ENDPROC(__hyp_stub_install_secondary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) __hyp_stub_do_trap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #ifdef ZIMAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	teq	r0, #HVC_SET_VECTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	bne	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	/* Only the ZIMAGE stubs can change the HYP vectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	mcr	p15, 4, r1, c12, c0, 0	@ set HVBAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	b	__hyp_stub_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 1:	teq	r0, #HVC_SOFT_RESTART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	bne	2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	bx	r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 2:	ldr	r0, =HVC_STUB_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	__ERET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) __hyp_stub_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	mov	r0, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	__ERET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ENDPROC(__hyp_stub_do_trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  * __hyp_set_vectors is only used when ZIMAGE must bounce between HYP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)  * and SVC. For the kernel itself, the vectors are set once and for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)  * all by the stubs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ENTRY(__hyp_set_vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	mov	r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	mov	r0, #HVC_SET_VECTORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	__HVC(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ENDPROC(__hyp_set_vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ENTRY(__hyp_soft_restart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	mov	r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	mov	r0, #HVC_SOFT_RESTART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	__HVC(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	ret	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ENDPROC(__hyp_soft_restart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #ifndef ZIMAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) .align 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) .L__boot_cpu_mode_offset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	.long	__boot_cpu_mode - .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) .align 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ENTRY(__hyp_stub_vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) __hyp_stub_reset:	W(b)	.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) __hyp_stub_und:		W(b)	.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) __hyp_stub_svc:		W(b)	.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) __hyp_stub_pabort:	W(b)	.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) __hyp_stub_dabort:	W(b)	.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) __hyp_stub_trap:	W(b)	__hyp_stub_do_trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) __hyp_stub_irq:		W(b)	.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) __hyp_stub_fiq:		W(b)	.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ENDPROC(__hyp_stub_vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)