Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <asm/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/uaccess-asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/v7m.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) @ Bad Abort numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) @ -----------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define BAD_PREFETCH	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define BAD_DATA	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define BAD_ADDREXCPTN	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define BAD_IRQ		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define BAD_UNDEFINSTR	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) @ Most of the stack format comes from struct pt_regs, but with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) @ the addition of 8 bytes for storing syscall args 5 and 6.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) @ This _must_ remain a multiple of 8 for EABI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) @
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define S_OFF		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /* 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * The SWI code relies on the fact that R0 is at the bottom of the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * (due to slow/fast restore user regs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #if S_R0 != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #error "Please fix"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	.macro	zero_fp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #ifdef CONFIG_FRAME_POINTER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	mov	fp, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #ifdef CONFIG_ALIGNMENT_TRAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define ATRAP(x...) x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define ATRAP(x...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	.macro	alignment_trap, rtmp1, rtmp2, label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #ifdef CONFIG_ALIGNMENT_TRAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	mrc	p15, 0, \rtmp2, c1, c0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	ldr	\rtmp1, \label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	ldr	\rtmp1, [\rtmp1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	teq	\rtmp1, \rtmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	mcrne	p15, 0, \rtmp1, c1, c0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #ifdef CONFIG_CPU_V7M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * ARMv7-M exception entry/exit macros.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * automatically saved on the current stack (32 words) before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * switching to the exception stack (SP_main).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * If exception is taken while in user mode, SP_main is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * empty. Otherwise, SP_main is aligned to 64 bit automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * (CCR.STKALIGN set).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * Linux assumes that the interrupts are disabled when entering an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * exception handler and it may BUG if this is not the case. Interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * are disabled during entry and reenabled in the exit macro.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * v7m_exception_slow_exit is used when returning from SVC or PendSV.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * When returning to kernel mode, we don't return from exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	.macro	v7m_exception_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	@ determine the location of the registers saved by the core during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	@ exception entry. Depending on the mode the cpu was in when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	@ exception happend that is either on the main or the process stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	@ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	@ was used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	tst	lr, #EXC_RET_STACK_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	mrsne	r12, psp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	moveq	r12, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	@ we cannot rely on r0-r3 and r12 matching the value saved in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	@ exception frame because of tail-chaining. So these have to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	@ reloaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	ldmia	r12!, {r0-r3}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	@ Linux expects to have irqs off. Do it here before taking stack space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	cpsid	i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	sub	sp, #PT_REGS_SIZE-S_IP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	stmdb	sp!, {r0-r11}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	@ load saved r12, lr, return address and xPSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	@ r0-r7 are used for signals and never touched from now on. Clobbering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	@ r8-r12 is OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	mov	r9, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	ldmia	r9!, {r8, r10-r12}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	@ calculate the original stack pointer value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	@ r9 currently points to the memory location just above the auto saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	@ xPSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	@ The cpu might automatically 8-byte align the stack. Bit 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	@ of the saved xPSR specifies if stack aligning took place. In this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	@ another 32-bit value is included in the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	tst	r12, V7M_xPSR_FRAMEPTRALIGN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	addne	r9, r9, #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	@ store saved r12 using str to have a register to hold the base for stm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	str	r8, [sp, #S_IP]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	add	r8, sp, #S_SP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	@ store r13-r15, xPSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	stmia	r8!, {r9-r12}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	@ store old_r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	str	r0, [r8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)         /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	 * PENDSV and SVCALL are configured to have the same exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	 * priorities. As a kernel thread runs at SVCALL execution priority it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	 * can never be preempted and so we will never have to return to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	 * kernel thread here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)          */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	.macro	v7m_exception_slow_exit ret_r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	cpsid	i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	ldr	lr, =exc_ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	ldr	lr, [lr]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	@ read original r12, sp, lr, pc and xPSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	add	r12, sp, #S_IP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	ldmia	r12, {r1-r5}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	@ an exception frame is always 8-byte aligned. To tell the hardware if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	@ the sp to be restored is aligned or not set bit 9 of the saved xPSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	@ accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	tst	r2, #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	subne	r2, r2, #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	orrne	r5, V7M_xPSR_FRAMEPTRALIGN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	biceq	r5, V7M_xPSR_FRAMEPTRALIGN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	@ ensure bit 0 is cleared in the PC, otherwise behaviour is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	@ unpredictable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	bic	r4, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	@ write basic exception frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	stmdb	r2!, {r1, r3-r5}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	ldmia	sp, {r1, r3-r5}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	.if	\ret_r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	stmdb	r2!, {r0, r3-r5}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	stmdb	r2!, {r1, r3-r5}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	@ restore process sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	msr	psp, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	@ restore original r4-r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	ldmia	sp!, {r0-r11}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	@ restore main sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	add	sp, sp, #PT_REGS_SIZE-S_IP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	cpsie	i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	bx	lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #endif	/* CONFIG_CPU_V7M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	@ Store/load the USER SP and LR registers by switching to the SYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	@ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	@ available. Should only be called from SVC mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	.macro	store_user_sp_lr, rd, rtemp, offset = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	mrs	\rtemp, cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	msr	cpsr_c, \rtemp			@ switch to the SYS mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	str	sp, [\rd, #\offset]		@ save sp_usr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	str	lr, [\rd, #\offset + 4]		@ save lr_usr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	.macro	load_user_sp_lr, rd, rtemp, offset = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	mrs	\rtemp, cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	msr	cpsr_c, \rtemp			@ switch to the SYS mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	ldr	sp, [\rd, #\offset]		@ load sp_usr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	ldr	lr, [\rd, #\offset + 4]		@ load lr_usr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	.macro	svc_exit, rpsr, irq = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	.if	\irq != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	@ IRQs already off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	@ The parent context IRQs must have been enabled to get here in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	@ the first place, so there's no point checking the PSR I bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	bl	trace_hardirqs_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	@ IRQs off again before pulling preserved data off the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	disable_irq_notrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	tst	\rpsr, #PSR_I_BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	bleq	trace_hardirqs_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	tst	\rpsr, #PSR_I_BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	blne	trace_hardirqs_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	uaccess_exit tsk, r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #ifndef CONFIG_THUMB2_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	@ ARM mode SVC restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	msr	spsr_cxsf, \rpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	@ We must avoid clrex due to Cortex-A15 erratum #830321
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	sub	r0, sp, #4			@ uninhabited address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	strex	r1, r2, [r0]			@ clear the exclusive monitor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	@ Thumb mode SVC restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	ldr	lr, [sp, #S_SP]			@ top of the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	ldrd	r0, r1, [sp, #S_LR]		@ calling lr and pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	@ We must avoid clrex due to Cortex-A15 erratum #830321
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	strex	r2, r1, [sp, #S_LR]		@ clear the exclusive monitor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	stmdb	lr!, {r0, r1, \rpsr}		@ calling lr and rfe context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	ldmia	sp, {r0 - r12}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	mov	sp, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	ldr	lr, [sp], #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	rfeia	sp!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	@ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	@ This macro acts in a similar manner to svc_exit but switches to FIQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	@ mode to restore the final part of the register state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	@ We cannot use the normal svc_exit procedure because that would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	@ clobber spsr_svc (FIQ could be delivered during the first few
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	@ instructions of vector_swi meaning its contents have not been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	@ saved anywhere).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	@ Note that, unlike svc_exit, this macro also does not allow a caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	@ supplied rpsr. This is because the FIQ exceptions are not re-entrant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	@ and the handlers cannot call into the scheduler (meaning the value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	@ on the stack remains correct).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	.macro  svc_exit_via_fiq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	uaccess_exit tsk, r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #ifndef CONFIG_THUMB2_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	@ ARM mode restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	mov	r0, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	ldmib	r0, {r1 - r14}	@ abort is deadly from here onward (it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 				@ clobber state restored below)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	msr	cpsr_c, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	add	r8, r0, #S_PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	ldr	r9, [r0, #S_PSR]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	msr	spsr_cxsf, r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	ldr	r0, [r0, #S_R0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	ldmia	r8, {pc}^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	@ Thumb mode restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	add	r0, sp, #S_R2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	ldr	lr, [sp, #S_LR]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	ldr	sp, [sp, #S_SP] @ abort is deadly from here onward (it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 			        @ clobber state restored below)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	ldmia	r0, {r2 - r12}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	mov	r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	msr	cpsr_c, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	sub	r0, #S_R2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	add	r8, r0, #S_PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	ldmia	r0, {r0 - r1}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	rfeia	r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	.macro	restore_user_regs, fast = 0, offset = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	uaccess_enable r1, isb=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #ifndef CONFIG_THUMB2_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	@ ARM mode restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	mov	r2, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	ldr	r1, [r2, #\offset + S_PSR]	@ get calling cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	ldr	lr, [r2, #\offset + S_PC]!	@ get pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	tst	r1, #PSR_I_BIT | 0x0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	bne	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	msr	spsr_cxsf, r1			@ save in spsr_svc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	@ We must avoid clrex due to Cortex-A15 erratum #830321
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	strex	r1, r2, [r2]			@ clear the exclusive monitor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	.if	\fast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	ldmdb	r2, {r1 - lr}^			@ get calling r1 - lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	ldmdb	r2, {r0 - lr}^			@ get calling r0 - lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	mov	r0, r0				@ ARMv5T and earlier require a nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 						@ after ldm {}^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	add	sp, sp, #\offset + PT_REGS_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	movs	pc, lr				@ return & move spsr_svc into cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 1:	bug	"Returning to usermode but unexpected PSR bits set?", \@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #elif defined(CONFIG_CPU_V7M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	@ V7M restore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	@ Note that we don't need to do clrex here as clearing the local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	@ monitor is part of the exception entry and exit sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	.if	\offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	add	sp, #\offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	v7m_exception_slow_exit ret_r0 = \fast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	@ Thumb mode restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	mov	r2, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	load_user_sp_lr r2, r3, \offset + S_SP	@ calling sp, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	ldr	lr, [sp, #\offset + S_PC]	@ get pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	add	sp, sp, #\offset + S_SP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	tst	r1, #PSR_I_BIT | 0x0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	bne	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	msr	spsr_cxsf, r1			@ save in spsr_svc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	@ We must avoid clrex due to Cortex-A15 erratum #830321
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	strex	r1, r2, [sp]			@ clear the exclusive monitor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	.if	\fast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	ldmdb	sp, {r1 - r12}			@ get calling r1 - r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	ldmdb	sp, {r0 - r12}			@ get calling r0 - r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	add	sp, sp, #PT_REGS_SIZE - S_SP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	movs	pc, lr				@ return & move spsr_svc into cpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 1:	bug	"Returning to usermode but unexpected PSR bits set?", \@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) #endif	/* !CONFIG_THUMB2_KERNEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)  * Context tracking subsystem.  Used to instrument transitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)  * between user and kernel mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	.macro ct_user_exit, save = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #ifdef CONFIG_CONTEXT_TRACKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	.if	\save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	stmdb   sp!, {r0-r3, ip, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	bl	context_tracking_user_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	ldmia	sp!, {r0-r3, ip, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	bl	context_tracking_user_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	.macro ct_user_enter, save = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #ifdef CONFIG_CONTEXT_TRACKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	.if	\save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	stmdb   sp!, {r0-r3, ip, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	bl	context_tracking_user_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	ldmia	sp!, {r0-r3, ip, lr}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	bl	context_tracking_user_enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	.macro	invoke_syscall, table, nr, tmp, ret, reload=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) #ifdef CONFIG_CPU_SPECTRE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	mov	\tmp, \nr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	cmp	\tmp, #NR_syscalls		@ check upper syscall limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	movcs	\tmp, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	csdb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	badr	lr, \ret			@ return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	.if	\reload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	ldmiacc	r1, {r0 - r6}			@ reload r0-r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	stmiacc	sp, {r4, r5}			@ update stack arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	ldrcc	pc, [\table, \tmp, lsl #2]	@ call sys_* routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	cmp	\nr, #NR_syscalls		@ check upper syscall limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	badr	lr, \ret			@ return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	.if	\reload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	ldmiacc	r1, {r0 - r6}			@ reload r0-r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	stmiacc	sp, {r4, r5}			@ update stack arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	ldrcc	pc, [\table, \nr, lsl #2]	@ call sys_* routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  * These are the registers used in the syscall handler, and allow us to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)  * have in theory up to 7 arguments to a function - r0 to r6.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)  * r7 is reserved for the system call number for thumb mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)  * Note that tbl == why is intentional.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)  * We must set at least "tsk" and "why" when calling ret_with_reschedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) scno	.req	r7		@ syscall number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) tbl	.req	r8		@ syscall table pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) why	.req	r8		@ Linux syscall (!= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) tsk	.req	r9		@ current thread_info