Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Low-level exception handling code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Authors:	Catalin Marinas <catalin.marinas@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *		Will Deacon <will.deacon@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/arm-smccc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <asm/asm_pointer_auth.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <asm/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <asm/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <asm/esr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <asm/scs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <asm/asm-uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * Context tracking and irqflag tracing need to instrument transitions between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * user and kernel mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	.macro user_exit_irqoff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	bl	enter_from_user_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	.macro user_enter_irqoff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	bl	exit_to_user_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	.macro	clear_gp_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	mov	x\n, xzr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	.endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * Bad Abort numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  *-----------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define BAD_SYNC	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define BAD_IRQ		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define BAD_FIQ		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define BAD_ERROR	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	.macro kernel_ventry, el, label, regsize = 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	.align 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) .Lventry_start\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	.if	\el == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	 * This must be the first instruction of the EL0 vector entries. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	 * skipped by the trampoline vectors, to trigger the cleanup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	b	.Lskip_tramp_vectors_cleanup\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	.if	\regsize == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	mrs	x30, tpidrro_el0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	msr	tpidrro_el0, xzr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	mov	x30, xzr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) .Lskip_tramp_vectors_cleanup\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	sub	sp, sp, #S_FRAME_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	 * Test whether the SP has overflowed, without corrupting a GPR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	 * should always be zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	add	sp, sp, x0			// sp' = sp + x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	tbnz	x0, #THREAD_SHIFT, 0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	b	el\()\el\()_\label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	 * Either we've just detected an overflow, or we've taken an exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	 * while on the overflow stack. Either way, we won't return to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	 * userspace, and can clobber EL0 registers to free up GPRs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	msr	tpidr_el0, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	/* Recover the original x0 value and stash it in tpidrro_el0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	sub	x0, sp, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	msr	tpidrro_el0, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	/* Switch to the overflow stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	 * Check whether we were already on the overflow stack. This may happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	 * after panic() re-enables interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	mrs	x0, tpidr_el0			// sp of interrupted context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	sub	x0, sp, x0			// delta with top of overflow stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	b.ne	__bad_stack			// no? -> bad stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	sub	sp, sp, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	mrs	x0, tpidrro_el0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	b	el\()\el\()_\label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) .org .Lventry_start\@ + 128	// Did we overflow the ventry slot?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	.macro tramp_alias, dst, sym, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	mov_q	\dst, TRAMP_VALIAS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	adr_l	\tmp, \sym
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	add	\dst, \dst, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	adr_l	\tmp, .entry.tramp.text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	sub	\dst, \dst, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	 * This macro corrupts x0-x3. It is the caller's duty  to save/restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	 * them if required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	.macro	apply_ssbd, state, tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) alternative_cb	spectre_v4_patch_fw_mitigation_enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	b	.L__asm_ssbd_skip\@		// Patched to NOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) alternative_cb_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	cbz	\tmp2,	.L__asm_ssbd_skip\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	mov	w1, #\state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) alternative_cb	smccc_patch_fw_mitigation_conduit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	nop					// Patched to SMC/HVC #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) alternative_cb_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) .L__asm_ssbd_skip\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	/* Check for MTE asynchronous tag check faults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	.macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #ifdef CONFIG_ARM64_MTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	.arch_extension lse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) alternative_if_not ARM64_MTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	b	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	 * Asynchronous tag check faults are only possible in ASYNC (2) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	 * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	 * set, so skip the check if it is unset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	tbz	\thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	mrs_s	\tmp, SYS_TFSRE0_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	tbz	\tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	mov	\tmp, #_TIF_MTE_ASYNC_FAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	add	\ti_flags, tsk, #TSK_TI_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	stset	\tmp, [\ti_flags]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	/* Clear the MTE asynchronous tag check faults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	.macro clear_mte_async_tcf thread_sctlr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) #ifdef CONFIG_ARM64_MTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) alternative_if ARM64_MTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	/* See comment in check_mte_async_tcf above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	tbz	\thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	dsb	ish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	msr_s	SYS_TFSRE0_EL1, xzr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	.macro mte_set_gcr, mte_ctrl, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) #ifdef CONFIG_ARM64_MTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	ubfx	\tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	orr	\tmp, \tmp, #SYS_GCR_EL1_RRND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	msr_s	SYS_GCR_EL1, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	.macro mte_set_kernel_gcr, tmp, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) #ifdef CONFIG_KASAN_HW_TAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) alternative_cb	kasan_hw_tags_enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	b	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) alternative_cb_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	mov	\tmp, KERNEL_GCR_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	msr_s	SYS_GCR_EL1, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	.macro mte_set_user_gcr, tsk, tmp, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) #ifdef CONFIG_KASAN_HW_TAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) alternative_cb	kasan_hw_tags_enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	b	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) alternative_cb_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	ldr	\tmp, [\tsk, #THREAD_MTE_CTRL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	mte_set_gcr \tmp, \tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	.macro	kernel_entry, el, regsize = 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	.if	\regsize == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	mov	w0, w0				// zero upper 32 bits of x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	stp	x0, x1, [sp, #16 * 0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	stp	x2, x3, [sp, #16 * 1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	stp	x4, x5, [sp, #16 * 2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	stp	x6, x7, [sp, #16 * 3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	stp	x8, x9, [sp, #16 * 4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	stp	x10, x11, [sp, #16 * 5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	stp	x12, x13, [sp, #16 * 6]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	stp	x14, x15, [sp, #16 * 7]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	stp	x16, x17, [sp, #16 * 8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	stp	x18, x19, [sp, #16 * 9]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	stp	x20, x21, [sp, #16 * 10]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	stp	x22, x23, [sp, #16 * 11]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	stp	x24, x25, [sp, #16 * 12]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	stp	x26, x27, [sp, #16 * 13]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	stp	x28, x29, [sp, #16 * 14]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	.if	\el == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	clear_gp_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	mrs	x21, sp_el0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	ldr_this_cpu	tsk, __entry_task, x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	msr	sp_el0, tsk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	 * when scheduling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	ldr	x19, [tsk, #TSK_TI_FLAGS]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	disable_step_tsk x19, x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	/* Check for asynchronous tag check faults in user space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	ldr	x0, [tsk, THREAD_SCTLR_USER]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	check_mte_async_tcf x22, x23, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) #ifdef CONFIG_ARM64_PTR_AUTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) alternative_if ARM64_HAS_ADDRESS_AUTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	 * Enable IA for in-kernel PAC if the task had it disabled. Although
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	 * this could be implemented with an unconditional MRS which would avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	 * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	 * Install the kernel IA key only if IA was enabled in the task. If IA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	 * was disabled on kernel exit then we would have left the kernel IA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	 * installed so there is no need to install it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	tbz	x0, SCTLR_ELx_ENIA_SHIFT, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	__ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	b	2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	mrs	x0, sctlr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	orr	x0, x0, SCTLR_ELx_ENIA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	msr	sctlr_el1, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	apply_ssbd 1, x22, x23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	mte_set_kernel_gcr x22, x23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	 * Any non-self-synchronizing system register updates required for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	 * kernel entry should be placed before this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) alternative_if ARM64_MTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	b	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) alternative_if ARM64_HAS_ADDRESS_AUTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	scs_load tsk, x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	add	x21, sp, #S_FRAME_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	get_current_task tsk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	/* Save the task's original addr_limit and set USER_DS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	mov	x20, #USER_DS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	.endif /* \el == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	mrs	x22, elr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	mrs	x23, spsr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	stp	lr, x21, [sp, #S_LR]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	 * In order to be able to dump the contents of struct pt_regs at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	 * time the exception was taken (in case we attempt to walk the call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	 * stack later), chain it together with the stack frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	.if \el == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	stp	xzr, xzr, [sp, #S_STACKFRAME]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	stp	x29, x22, [sp, #S_STACKFRAME]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	add	x29, sp, #S_STACKFRAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) #ifdef CONFIG_ARM64_SW_TTBR0_PAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) alternative_if_not ARM64_HAS_PAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	bl	__swpan_entry_el\el
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	stp	x22, x23, [sp, #S_PC]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	.if	\el == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	mov	w21, #NO_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	str	w21, [sp, #S_SYSCALLNO]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	/* Save pmr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) alternative_if ARM64_HAS_IRQ_PRIO_MASKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	mrs_s	x20, SYS_ICC_PMR_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	str	x20, [sp, #S_PMR_SAVE]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	mov	x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	msr_s	SYS_ICC_PMR_EL1, x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	/* Re-enable tag checking (TCO set on exception entry) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) #ifdef CONFIG_ARM64_MTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) alternative_if ARM64_MTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	SET_PSTATE_TCO(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	 * Registers that may be useful after this macro is invoked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	 * x20 - ICC_PMR_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	 * x21 - aborted SP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	 * x22 - aborted PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	 * x23 - aborted PSTATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	.macro	kernel_exit, el
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	.if	\el != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	disable_daif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	/* Restore the task's original addr_limit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	/* No need to restore UAO, it will be restored from SPSR_EL1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	/* Restore pmr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) alternative_if ARM64_HAS_IRQ_PRIO_MASKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	ldr	x20, [sp, #S_PMR_SAVE]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	msr_s	SYS_ICC_PMR_EL1, x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	mrs_s	x21, SYS_ICC_CTLR_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	tbz	x21, #6, .L__skip_pmr_sync\@	// Check for ICC_CTLR_EL1.PMHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	dsb	sy				// Ensure priority change is seen by redistributor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) .L__skip_pmr_sync\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) #ifdef CONFIG_ARM64_SW_TTBR0_PAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) alternative_if_not ARM64_HAS_PAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	bl	__swpan_exit_el\el
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	.if	\el == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	ldr	x23, [sp, #S_SP]		// load return stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	msr	sp_el0, x23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	tst	x22, #PSR_MODE32_BIT		// native task?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	b.eq	3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) #ifdef CONFIG_ARM64_ERRATUM_845719
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) alternative_if ARM64_WORKAROUND_845719
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) #ifdef CONFIG_PID_IN_CONTEXTIDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	mrs	x29, contextidr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	msr	contextidr_el1, x29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	msr contextidr_el1, xzr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	scs_save tsk, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	/* Ignore asynchronous tag check faults in the uaccess routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	ldr	x0, [tsk, THREAD_SCTLR_USER]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	clear_mte_async_tcf x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) #ifdef CONFIG_ARM64_PTR_AUTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) alternative_if ARM64_HAS_ADDRESS_AUTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	 * IA was enabled for in-kernel PAC. Disable it now if needed, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	 * alternatively install the user's IA. All other per-task keys and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	 * SCTLR bits were updated on task switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	 * No kernel C function calls after this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	tbz	x0, SCTLR_ELx_ENIA_SHIFT, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	__ptrauth_keys_install_user tsk, x0, x1, x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	b	2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	mrs	x0, sctlr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	bic	x0, x0, SCTLR_ELx_ENIA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	msr	sctlr_el1, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	mte_set_user_gcr tsk, x0, x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	apply_ssbd 0, x0, x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	msr	elr_el1, x21			// set up the return data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	msr	spsr_el1, x22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	ldp	x0, x1, [sp, #16 * 0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	ldp	x2, x3, [sp, #16 * 1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	ldp	x4, x5, [sp, #16 * 2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	ldp	x6, x7, [sp, #16 * 3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	ldp	x8, x9, [sp, #16 * 4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	ldp	x10, x11, [sp, #16 * 5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	ldp	x12, x13, [sp, #16 * 6]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	ldp	x14, x15, [sp, #16 * 7]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	ldp	x16, x17, [sp, #16 * 8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	ldp	x18, x19, [sp, #16 * 9]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	ldp	x20, x21, [sp, #16 * 10]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	ldp	x22, x23, [sp, #16 * 11]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	ldp	x24, x25, [sp, #16 * 12]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	ldp	x26, x27, [sp, #16 * 13]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	ldp	x28, x29, [sp, #16 * 14]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	.if	\el == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	ldr	lr, [sp, #S_LR]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	add	sp, sp, #S_FRAME_SIZE		// restore sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	eret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	bne	4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	msr	far_el1, x29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	tramp_alias	x30, tramp_exit_native, x29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	br	x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	tramp_alias	x30, tramp_exit_compat, x29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	br	x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	ldr	lr, [sp, #S_LR]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	add	sp, sp, #S_FRAME_SIZE		// restore sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	/* Ensure any device/NC reads complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	eret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	sb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) #ifdef CONFIG_ARM64_SW_TTBR0_PAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	 * EL0, there is no need to check the state of TTBR0_EL1 since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	 * accesses are always enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	 * user mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) SYM_CODE_START_LOCAL(__swpan_entry_el1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	mrs	x21, ttbr0_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	b.eq	1f				// TTBR0 access already disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	__uaccess_ttbr0_disable x21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 1:	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) SYM_CODE_END(__swpan_entry_el1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	 * PAN bit checking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) SYM_CODE_START_LOCAL(__swpan_exit_el1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	__uaccess_ttbr0_enable x0, x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 1:	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) SYM_CODE_END(__swpan_exit_el1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) SYM_CODE_START_LOCAL(__swpan_exit_el0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	__uaccess_ttbr0_enable x0, x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	 * Enable errata workarounds only if returning to user. The only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	 * workaround currently required for TTBR0_EL1 changes are for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	 * corruption).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	b	post_ttbr_update_workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) SYM_CODE_END(__swpan_exit_el0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	.macro	irq_stack_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	mov	x19, sp			// preserve the original sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) #ifdef CONFIG_SHADOW_CALL_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	mov	x24, scs_sp		// preserve the original shadow stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	 * Compare sp with the base of the task stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	 * and should switch to the irq stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	ldr	x25, [tsk, TSK_STACK]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	eor	x25, x25, x19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	and	x25, x25, #~(THREAD_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	cbnz	x25, 9998f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	ldr_this_cpu x25, irq_stack_ptr, x26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	mov	x26, #IRQ_STACK_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	add	x26, x25, x26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	/* switch to the irq stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	mov	sp, x26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) #ifdef CONFIG_SHADOW_CALL_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	/* also switch to the irq shadow stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 9998:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	 * The callee-saved regs (x19-x29) should be preserved between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	 * irq_stack_entry and irq_stack_exit, but note that kernel_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	 * uses x20-x23 to store data for later use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	.macro	irq_stack_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	mov	sp, x19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) #ifdef CONFIG_SHADOW_CALL_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	mov	scs_sp, x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) /* GPRs used by entry code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) tsk	.req	x28		// current thread_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  * Interrupt handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	.macro	irq_handler, handler:req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	ldr_l	x1, \handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	mov	x0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	irq_stack_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	blr	x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	irq_stack_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) #ifdef CONFIG_ARM64_PSEUDO_NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	 * Set res to 0 if irqs were unmasked in interrupted context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	 * Otherwise set res to non-0 value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	.macro	test_irqs_unmasked res:req, pmr:req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) alternative_if ARM64_HAS_IRQ_PRIO_MASKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	sub	\res, \pmr, #GIC_PRIO_IRQON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) alternative_else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	mov	\res, xzr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) alternative_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	.macro	gic_prio_kentry_setup, tmp:req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) #ifdef CONFIG_ARM64_PSEUDO_NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	mov	\tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	msr_s	SYS_ICC_PMR_EL1, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	.macro el1_interrupt_handler, handler:req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	enable_da_f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	mov	x0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	bl	enter_el1_irq_or_nmi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	irq_handler	\handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) alternative_if ARM64_HAS_IRQ_PRIO_MASKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	 * DA_F were cleared at start of handling. If anything is set in DAIF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	 * we come back from an NMI, so skip preemption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	mrs	x0, daif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	orr	x24, x24, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	cbnz	x24, 1f				// preempt count != 0 || NMI return path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	mov	x0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	bl	exit_el1_irq_or_nmi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	.macro el0_interrupt_handler, handler:req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	user_exit_irqoff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	enable_da_f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	tbz	x22, #55, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	bl	do_el0_irq_bp_hardening
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	irq_handler	\handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	.text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  * Exception vectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	.pushsection ".entry.text", "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	.align	11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) SYM_CODE_START(vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	kernel_ventry	1, sync_invalid			// Synchronous EL1t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	kernel_ventry	1, irq_invalid			// IRQ EL1t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	kernel_ventry	1, fiq_invalid			// FIQ EL1t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	kernel_ventry	1, error_invalid		// Error EL1t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	kernel_ventry	1, sync				// Synchronous EL1h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	kernel_ventry	1, irq				// IRQ EL1h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	kernel_ventry	1, fiq_invalid			// FIQ EL1h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	kernel_ventry	1, error			// Error EL1h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	kernel_ventry	0, sync				// Synchronous 64-bit EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	kernel_ventry	0, irq				// IRQ 64-bit EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	kernel_ventry	0, error			// Error 64-bit EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) SYM_CODE_END(vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	 * We detected an overflow in kernel_ventry, which switched to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	 * overflow stack. Stash the exception regs, and head to our overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	 * handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) __bad_stack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	/* Restore the original x0 value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	mrs	x0, tpidrro_el0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	 * Store the original GPRs to the new stack. The orginal SP (minus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	sub	sp, sp, #S_FRAME_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	kernel_entry 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	mrs	x0, tpidr_el0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	add	x0, x0, #S_FRAME_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	str	x0, [sp, #S_SP]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	/* Stash the regs for handle_bad_stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	mov	x0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	/* Time to die */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	bl	handle_bad_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	ASM_BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) #endif /* CONFIG_VMAP_STACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714)  * Invalid mode handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	.macro	inv_entry, el, reason, regsize = 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	kernel_entry \el, \regsize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	mov	x0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	mov	x1, #\reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	mrs	x2, esr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	bl	bad_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	ASM_BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) SYM_CODE_START_LOCAL(el0_sync_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	inv_entry 0, BAD_SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) SYM_CODE_END(el0_sync_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) SYM_CODE_START_LOCAL(el0_irq_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	inv_entry 0, BAD_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) SYM_CODE_END(el0_irq_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) SYM_CODE_START_LOCAL(el0_fiq_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	inv_entry 0, BAD_FIQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) SYM_CODE_END(el0_fiq_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) SYM_CODE_START_LOCAL(el0_error_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	inv_entry 0, BAD_ERROR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) SYM_CODE_END(el0_error_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	inv_entry 0, BAD_FIQ, 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) SYM_CODE_END(el0_fiq_invalid_compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) SYM_CODE_START_LOCAL(el1_sync_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	inv_entry 1, BAD_SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) SYM_CODE_END(el1_sync_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) SYM_CODE_START_LOCAL(el1_irq_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	inv_entry 1, BAD_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) SYM_CODE_END(el1_irq_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) SYM_CODE_START_LOCAL(el1_fiq_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	inv_entry 1, BAD_FIQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) SYM_CODE_END(el1_fiq_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) SYM_CODE_START_LOCAL(el1_error_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	inv_entry 1, BAD_ERROR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) SYM_CODE_END(el1_error_invalid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  * EL1 mode handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	.align	6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	kernel_entry 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	mov	x0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	bl	el1_sync_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	kernel_exit 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) SYM_CODE_END(el1_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	.align	6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	kernel_entry 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	el1_interrupt_handler handle_arch_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	kernel_exit 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) SYM_CODE_END(el1_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782)  * EL0 mode handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	.align	6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	kernel_entry 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	mov	x0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	bl	el0_sync_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	b	ret_to_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) SYM_CODE_END(el0_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	.align	6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	kernel_entry 0, 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	mov	x0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	bl	el0_sync_compat_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	b	ret_to_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) SYM_CODE_END(el0_sync_compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	.align	6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	kernel_entry 0, 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	b	el0_irq_naked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) SYM_CODE_END(el0_irq_compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	kernel_entry 0, 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	b	el0_error_naked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) SYM_CODE_END(el0_error_compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	.align	6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	kernel_entry 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) el0_irq_naked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	el0_interrupt_handler handle_arch_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	b	ret_to_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) SYM_CODE_END(el0_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) SYM_CODE_START_LOCAL(el1_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	kernel_entry 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	mrs	x1, esr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	enable_dbg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	mov	x0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	bl	do_serror
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	kernel_exit 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) SYM_CODE_END(el1_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) SYM_CODE_START_LOCAL(el0_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	kernel_entry 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) el0_error_naked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	mrs	x25, esr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	user_exit_irqoff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	enable_dbg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	mov	x0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	mov	x1, x25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	bl	do_serror
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	enable_da_f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	b	ret_to_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) SYM_CODE_END(el0_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844)  * "slow" syscall return path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) SYM_CODE_START_LOCAL(ret_to_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	disable_daif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	gic_prio_kentry_setup tmp=x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	bl	trace_hardirqs_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	ldr	x19, [tsk, #TSK_TI_FLAGS]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	and	x2, x19, #_TIF_WORK_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	cbnz	x2, work_pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) finish_ret_to_user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	user_enter_irqoff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	enable_step_tsk x19, x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	bl	stackleak_erase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	kernel_exit 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864)  * Ok, we need to do extra processing, enter the slow path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) work_pending:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	mov	x0, sp				// 'regs'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	mov	x1, x19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	bl	do_notify_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	ldr	x19, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	b	finish_ret_to_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) SYM_CODE_END(ret_to_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	.popsection				// .entry.text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	// Move from tramp_pg_dir to swapper_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	.macro tramp_map_kernel, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	mrs	\tmp, ttbr1_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	add	\tmp, \tmp, #(2 * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	bic	\tmp, \tmp, #USER_ASID_FLAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	msr	ttbr1_el1, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	/* ASID already in \tmp[63:48] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	/* 2MB boundary containing the vectors, so we nobble the walk cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	tlbi	vae1, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	dsb	nsh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	// Move from swapper_pg_dir to tramp_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	.macro tramp_unmap_kernel, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	mrs	\tmp, ttbr1_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	sub	\tmp, \tmp, #(2 * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	orr	\tmp, \tmp, #USER_ASID_FLAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	msr	ttbr1_el1, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	 * We avoid running the post_ttbr_update_workaround here because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	 * it's only needed by Cavium ThunderX, which requires KPTI to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	 * disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	.macro tramp_data_page	dst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	adr_l	\dst, .entry.tramp.text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	sub	\dst, \dst, PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	.macro tramp_data_read_var	dst, var
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) #ifdef CONFIG_RANDOMIZE_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	tramp_data_page		\dst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	add	\dst, \dst, #:lo12:__entry_tramp_data_\var
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	ldr	\dst, [\dst]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	ldr	\dst, =\var
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) #define BHB_MITIGATION_NONE	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) #define BHB_MITIGATION_LOOP	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) #define BHB_MITIGATION_FW	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) #define BHB_MITIGATION_INSN	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	.macro tramp_ventry, vector_start, regsize, kpti, bhb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	.align	7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	.if	\regsize == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	msr	tpidrro_el0, x30	// Restored in kernel_ventry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	.if	\bhb == BHB_MITIGATION_LOOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	 * This sequence must appear before the first indirect branch. i.e. the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	 * ret out of tramp_ventry. It appears here because x30 is free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	__mitigate_spectre_bhb_loop	x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	.endif // \bhb == BHB_MITIGATION_LOOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	.if	\bhb == BHB_MITIGATION_INSN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	clearbhb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	.endif // \bhb == BHB_MITIGATION_INSN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	.if	\kpti == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	 * Defend against branch aliasing attacks by pushing a dummy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	 * entry onto the return stack and using a RET instruction to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	 * enter the full-fat kernel vectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	bl	2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	b	.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	tramp_map_kernel	x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	tramp_data_read_var	x30, vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	prfm	plil1strm, [x30, #(1b - \vector_start)]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	msr	vbar_el1, x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	ldr	x30, =vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	.endif // \kpti == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	.if	\bhb == BHB_MITIGATION_FW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	 * The firmware sequence must appear before the first indirect branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	 * i.e. the ret out of tramp_ventry. But it also needs the stack to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	 * mapped to save/restore the registers the SMC clobbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	__mitigate_spectre_bhb_fw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	.endif // \bhb == BHB_MITIGATION_FW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	add	x30, x30, #(1b - \vector_start + 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) .org 1b + 128	// Did we overflow the ventry slot?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	.macro tramp_exit, regsize = 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	tramp_data_read_var	x30, this_cpu_vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	this_cpu_offset x29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	ldr	x30, [x30, x29]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	msr	vbar_el1, x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	ldr	lr, [sp, #S_LR]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	tramp_unmap_kernel	x29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	.if	\regsize == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	mrs	x29, far_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	add	sp, sp, #S_FRAME_SIZE		// restore sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	eret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	sb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	.macro	generate_tramp_vector,	kpti, bhb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) .Lvector_start\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	.space	0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	.rept	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	tramp_ventry	.Lvector_start\@, 64, \kpti, \bhb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	.endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	.rept	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	tramp_ventry	.Lvector_start\@, 32, \kpti, \bhb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	.endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)  * Exception vectors trampoline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)  * The order must match __bp_harden_el1_vectors and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  * arm64_bp_harden_el1_vectors enum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	.pushsection ".entry.tramp.text", "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	.align	11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) SYM_CODE_START_NOALIGN(tramp_vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_LOOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_FW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_INSN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) SYM_CODE_END(tramp_vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) SYM_CODE_START(tramp_exit_native)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	tramp_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) SYM_CODE_END(tramp_exit_native)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) SYM_CODE_START(tramp_exit_compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	tramp_exit	32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) SYM_CODE_END(tramp_exit_compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	.ltorg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	.popsection				// .entry.tramp.text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) #ifdef CONFIG_RANDOMIZE_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	.pushsection ".rodata", "a"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	.align PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) SYM_DATA_START(__entry_tramp_data_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) __entry_tramp_data_vectors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	.quad	vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) #ifdef CONFIG_ARM_SDE_INTERFACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) __entry_tramp_data___sdei_asm_handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	.quad	__sdei_asm_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) #endif /* CONFIG_ARM_SDE_INTERFACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) __entry_tramp_data_this_cpu_vector:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	.quad	this_cpu_vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) SYM_DATA_END(__entry_tramp_data_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	.popsection				// .rodata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) #endif /* CONFIG_RANDOMIZE_BASE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)  * Exception vectors for spectre mitigations on entry from EL1 when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)  * kpti is not in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	.macro generate_el1_vector, bhb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) .Lvector_start\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	kernel_ventry	1, sync_invalid			// Synchronous EL1t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	kernel_ventry	1, irq_invalid			// IRQ EL1t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	kernel_ventry	1, fiq_invalid			// FIQ EL1t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	kernel_ventry	1, error_invalid		// Error EL1t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	kernel_ventry	1, sync				// Synchronous EL1h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	kernel_ventry	1, irq				// IRQ EL1h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	kernel_ventry	1, fiq_invalid			// FIQ EL1h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	kernel_ventry	1, error			// Error EL1h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	.rept	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	tramp_ventry	.Lvector_start\@, 64, 0, \bhb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	.endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	.rept 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	tramp_ventry	.Lvector_start\@, 32, 0, \bhb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	.endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	.pushsection ".entry.text", "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	.align	11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) SYM_CODE_START(__bp_harden_el1_vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	generate_el1_vector	bhb=BHB_MITIGATION_LOOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	generate_el1_vector	bhb=BHB_MITIGATION_FW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	generate_el1_vector	bhb=BHB_MITIGATION_INSN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) SYM_CODE_END(__bp_harden_el1_vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	.popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)  * Register switch for AArch64. The callee-saved registers need to be saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)  * and restored. On entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)  *   x0 = previous task_struct (must be preserved across the switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)  *   x1 = next task_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)  * Previous and next are guaranteed not to be the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) SYM_FUNC_START(cpu_switch_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	mov	x10, #THREAD_CPU_CONTEXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	add	x8, x0, x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	mov	x9, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	stp	x19, x20, [x8], #16		// store callee-saved registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	stp	x21, x22, [x8], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	stp	x23, x24, [x8], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	stp	x25, x26, [x8], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	stp	x27, x28, [x8], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	stp	x29, x9, [x8], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	str	lr, [x8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	add	x8, x1, x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	ldp	x19, x20, [x8], #16		// restore callee-saved registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	ldp	x21, x22, [x8], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	ldp	x23, x24, [x8], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	ldp	x25, x26, [x8], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	ldp	x27, x28, [x8], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	ldp	x29, x9, [x8], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	ldr	lr, [x8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	mov	sp, x9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	msr	sp_el0, x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	ptrauth_keys_install_kernel x1, x8, x9, x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	scs_save x0, x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	scs_load x1, x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) SYM_FUNC_END(cpu_switch_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) NOKPROBE(cpu_switch_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)  * This is how we return from a fork.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) SYM_CODE_START(ret_from_fork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	bl	schedule_tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	cbz	x19, 1f				// not a kernel thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	mov	x0, x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	blr	x19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 1:	get_current_task tsk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	b	ret_to_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) SYM_CODE_END(ret_from_fork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) NOKPROBE(ret_from_fork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) #ifdef CONFIG_ARM_SDE_INTERFACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) #include <asm/sdei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) #include <uapi/linux/arm_sdei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) .macro sdei_handler_exit exit_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	/* On success, this call never returns... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	cmp	\exit_mode, #SDEI_EXIT_SMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	b.ne	99f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	smc	#0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	b	.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 99:	hvc	#0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	b	.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)  * The regular SDEI entry point may have been unmapped along with the rest of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)  * the kernel. This trampoline restores the kernel mapping to make the x1 memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)  * argument accessible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)  * This clobbers x4, __sdei_handler() will restore this from firmware's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)  * copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) .ltorg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) .pushsection ".entry.tramp.text", "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) SYM_CODE_START(__sdei_asm_entry_trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	mrs	x4, ttbr1_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	tbz	x4, #USER_ASID_BIT, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	tramp_map_kernel tmp=x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	mov	x4, xzr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	 * the kernel on exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 1:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	tramp_data_read_var     x4, __sdei_asm_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	br	x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) SYM_CODE_END(__sdei_asm_entry_trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) NOKPROBE(__sdei_asm_entry_trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)  * Make the exit call and restore the original ttbr1_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)  * x0 & x1: setup for the exit API call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)  * x2: exit_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)  * x4: struct sdei_registered_event argument from registration time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) SYM_CODE_START(__sdei_asm_exit_trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	cbnz	x4, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	tramp_unmap_kernel	tmp=x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 1:	sdei_handler_exit exit_mode=x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) SYM_CODE_END(__sdei_asm_exit_trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) NOKPROBE(__sdei_asm_exit_trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	.ltorg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) .popsection		// .entry.tramp.text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)  * Software Delegated Exception entry point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)  * x0: Event number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)  * x1: struct sdei_registered_event argument from registration time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)  * x2: interrupted PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)  * x3: interrupted PSTATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)  * x4: maybe clobbered by the trampoline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)  * Firmware has preserved x0->x17 for us, we must save/restore the rest to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)  * follow SMC-CC. We save (or retrieve) all the registers as the handler may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)  * want them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) SYM_CODE_START(__sdei_asm_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	mov	x4, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	mov	x19, x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) #if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	 * entry.S may have been using sp as a scratch register, find whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	 * this is a normal or critical event and switch to the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	 * stack for this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	cbnz	w4, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	b	2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 1:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 2:	mov	x6, #SDEI_STACK_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	add	x5, x5, x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	mov	sp, x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) #ifdef CONFIG_SHADOW_CALL_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	/* Use a separate shadow call stack for normal and critical events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	cbnz	w4, 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	b	4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 3:	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	 * We may have interrupted userspace, or a guest, or exit-from or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	 * return-to either of these. We can't trust sp_el0, restore it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	mrs	x28, sp_el0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	msr	sp_el0, x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	/* If we interrupted the kernel point to the previous stack/frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	and     x0, x3, #0xc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	mrs     x1, CurrentEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	cmp     x0, x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	csel	x29, x29, xzr, eq	// fp, or zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	csel	x4, x2, xzr, eq		// elr, or zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	stp	x29, x4, [sp, #-16]!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	mov	x29, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	add	x0, x19, #SDEI_EVENT_INTREGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	mov	x1, x19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	bl	__sdei_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	msr	sp_el0, x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	/* restore regs >x17 that we clobbered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	mov	sp, x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	mov	x1, x0			// address to complete_and_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	cmp	x0, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	csel	x0, x2, x3, ls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	ldr_l	x2, sdei_exit_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	sdei_handler_exit exit_mode=x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	br	x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) SYM_CODE_END(__sdei_asm_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) NOKPROBE(__sdei_asm_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) #endif /* CONFIG_ARM_SDE_INTERFACE */