Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 1996-2000 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #error "Only include this from assembly code"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #ifndef __ASM_ASSEMBLER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #define __ASM_ASSEMBLER_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm-generic/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/asm-bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/debug-monitors.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/pgtable-hwdef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	 * Provide a wxN alias for each wN register so what we can paste a xN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	 * reference after a 'w' to obtain the 32-bit version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	wx\n	.req	w\n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	.endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	.macro save_and_disable_daif, flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	mrs	\flags, daif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	msr	daifset, #0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	.macro disable_daif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	msr	daifset, #0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	.macro enable_daif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	msr	daifclr, #0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	.macro	restore_daif, flags:req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	msr	daif, \flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	/* IRQ is the lowest priority flag, unconditionally unmask the rest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	.macro enable_da_f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	msr	daifclr, #(8 | 4 | 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * Save/restore interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	.macro	save_and_disable_irq, flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	mrs	\flags, daif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	msr	daifset, #2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	.macro	restore_irq, flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	msr	daif, \flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	.macro	enable_dbg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	msr	daifclr, #8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	.macro	disable_step_tsk, flgs, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	mrs	\tmp, mdscr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	bic	\tmp, \tmp, #DBG_MDSCR_SS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	msr	mdscr_el1, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	isb	// Synchronise with enable_dbg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 9990:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	/* call with daif masked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	.macro	enable_step_tsk, flgs, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	mrs	\tmp, mdscr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	orr	\tmp, \tmp, #DBG_MDSCR_SS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	msr	mdscr_el1, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 9990:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * RAS Error Synchronization barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	.macro  esb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #ifdef CONFIG_ARM64_RAS_EXTN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	hint    #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * Value prediction barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	.macro	csdb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	hint	#20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * Clear Branch History instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	.macro clearbhb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	hint	#22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  * Speculation barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	.macro	sb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) alternative_if_not ARM64_HAS_SB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	dsb	nsh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) alternative_else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	SB_BARRIER_INSN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) alternative_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  * NOP sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	.macro	nops, num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	.rept	\num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	.endr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * Emit an entry into the exception table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	.macro		_asm_extable, from, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	.pushsection	__ex_table, "a"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	.align		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	.long		(\from - .), (\to - .)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	.popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define USER(l, x...)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 9999:	x;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	_asm_extable	9999b, l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * Register aliases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) lr	.req	x30		// link register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * Vector entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	 .macro	ventry	label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	.align	7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	b	\label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  * Select code when configured for BE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #ifdef CONFIG_CPU_BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define CPU_BE(code...) code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define CPU_BE(code...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  * Select code when configured for LE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #ifdef CONFIG_CPU_BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define CPU_LE(code...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define CPU_LE(code...) code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * Define a macro that constructs a 64-bit value by concatenating two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * 32-bit registers. Note that on big endian systems the order of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * registers is swapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #ifndef CONFIG_CPU_BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	.macro	regs_to_64, rd, lbits, hbits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	.macro	regs_to_64, rd, hbits, lbits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	orr	\rd, \lbits, \hbits, lsl #32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * <symbol> is within the range +/- 4 GB of the PC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	 * @dst: destination register (64 bit wide)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	 * @sym: name of the symbol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	.macro	adr_l, dst, sym
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	adrp	\dst, \sym
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	add	\dst, \dst, :lo12:\sym
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	 * @dst: destination register (32 or 64 bit wide)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	 * @sym: name of the symbol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	 * @tmp: optional 64-bit scratch register to be used if <dst> is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	 *       32-bit wide register, in which case it cannot be used to hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	 *       the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	.macro	ldr_l, dst, sym, tmp=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	.ifb	\tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	adrp	\dst, \sym
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	ldr	\dst, [\dst, :lo12:\sym]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	adrp	\tmp, \sym
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	ldr	\dst, [\tmp, :lo12:\sym]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	 * @src: source register (32 or 64 bit wide)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	 * @sym: name of the symbol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	 * @tmp: mandatory 64-bit scratch register to calculate the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	 *       while <src> needs to be preserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	.macro	str_l, src, sym, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	adrp	\tmp, \sym
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	str	\src, [\tmp, :lo12:\sym]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	 * @dst: destination register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	.macro	this_cpu_offset, dst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	mrs	\dst, tpidr_el2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	.macro	this_cpu_offset, dst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	mrs	\dst, tpidr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) alternative_else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	mrs	\dst, tpidr_el2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) alternative_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	 * @sym: The name of the per-cpu variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	 * @tmp: scratch register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	.macro adr_this_cpu, dst, sym, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	adrp	\tmp, \sym
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	add	\dst, \tmp, #:lo12:\sym
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	this_cpu_offset \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	add	\dst, \dst, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	 * @sym: The name of the per-cpu variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	 * @tmp: scratch register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	.macro ldr_this_cpu dst, sym, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	adr_l	\dst, \sym
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	this_cpu_offset \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	ldr	\dst, [\dst, \tmp]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	.macro	vma_vm_mm, rd, rn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	ldr	\rd, [\rn, #VMA_VM_MM]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  * read_ctr - read CTR_EL0. If the system has mismatched register fields,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	.macro	read_ctr, reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #ifndef __KVM_NVHE_HYPERVISOR__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	mrs	\reg, ctr_el0			// read CTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) alternative_else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) alternative_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) alternative_if_not ARM64_KVM_PROTECTED_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	ASM_BUG()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) alternative_else_nop_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) alternative_cb kvm_compute_final_ctr_el0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	movz	\reg, #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	movk	\reg, #0, lsl #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	movk	\reg, #0, lsl #32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	movk	\reg, #0, lsl #48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) alternative_cb_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)  * raw_dcache_line_size - get the minimum D-cache line size on this CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)  * from the CTR register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	.macro	raw_dcache_line_size, reg, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	mrs	\tmp, ctr_el0			// read CTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	mov	\reg, #4			// bytes per word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	lsl	\reg, \reg, \tmp		// actual cache line size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  * dcache_line_size - get the safe D-cache line size across all CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	.macro	dcache_line_size, reg, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	read_ctr	\tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	ubfm		\tmp, \tmp, #16, #19	// cache line size encoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	mov		\reg, #4		// bytes per word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	lsl		\reg, \reg, \tmp	// actual cache line size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)  * raw_icache_line_size - get the minimum I-cache line size on this CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)  * from the CTR register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	.macro	raw_icache_line_size, reg, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	mrs	\tmp, ctr_el0			// read CTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	and	\tmp, \tmp, #0xf		// cache line size encoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	mov	\reg, #4			// bytes per word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	lsl	\reg, \reg, \tmp		// actual cache line size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  * icache_line_size - get the safe I-cache line size across all CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	.macro	icache_line_size, reg, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	read_ctr	\tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	and		\tmp, \tmp, #0xf	// cache line size encoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	mov		\reg, #4		// bytes per word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	lsl		\reg, \reg, \tmp	// actual cache line size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	.macro	tcr_set_t0sz, valreg, t0sz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	bfi	\valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  * tcr_set_t1sz - update TCR.T1SZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	.macro	tcr_set_t1sz, valreg, t1sz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	bfi	\valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)  * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)  * ID_AA64MMFR0_EL1.PARange value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)  *	tcr:		register with the TCR_ELx value to be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)  *	pos:		IPS or PS bitfield position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)  *	tmp{0,1}:	temporary registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	.macro	tcr_compute_pa_size, tcr, pos, tmp0, tmp1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	mrs	\tmp0, ID_AA64MMFR0_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	// Narrow PARange to fit the PS field in TCR_ELx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	ubfx	\tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	mov	\tmp1, #ID_AA64MMFR0_PARANGE_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	cmp	\tmp0, \tmp1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	csel	\tmp0, \tmp1, \tmp0, hi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	bfi	\tcr, \tmp0, \pos, #3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)  * Macro to perform a data cache maintenance for the interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)  * [kaddr, kaddr + size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)  * 	op:		operation passed to dc instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)  * 	domain:		domain used in dsb instruciton
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)  * 	kaddr:		starting virtual address of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)  * 	size:		size of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)  * 	Corrupts:	kaddr, size, tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	.macro __dcache_op_workaround_clean_cache, op, kaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	dc	\op, \kaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) alternative_else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	dc	civac, \kaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) alternative_endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	dcache_line_size \tmp1, \tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	add	\size, \kaddr, \size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	sub	\tmp2, \tmp1, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	bic	\kaddr, \kaddr, \tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 9998:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	.ifc	\op, cvau
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	__dcache_op_workaround_clean_cache \op, \kaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	.ifc	\op, cvac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	__dcache_op_workaround_clean_cache \op, \kaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	.ifc	\op, cvap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	sys	3, c7, c12, 1, \kaddr	// dc cvap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	.ifc	\op, cvadp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	sys	3, c7, c13, 1, \kaddr	// dc cvadp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	dc	\op, \kaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	add	\kaddr, \kaddr, \tmp1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	cmp	\kaddr, \size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	b.lo	9998b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	dsb	\domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)  * Macro to perform an instruction cache maintenance for the interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)  * [start, end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)  * 	start, end:	virtual addresses describing the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)  *	label:		A label to branch to on user fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)  * 	Corrupts:	tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	.macro invalidate_icache_by_line start, end, tmp1, tmp2, label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	icache_line_size \tmp1, \tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	sub	\tmp2, \tmp1, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	bic	\tmp2, \start, \tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 9997:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	add	\tmp2, \tmp2, \tmp1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	cmp	\tmp2, \end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	b.lo	9997b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	dsb	ish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)  * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	.macro	reset_pmuserenr_el0, tmpreg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	mrs	\tmpreg, id_aa64dfr0_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	sbfx	\tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	cmp	\tmpreg, #1			// Skip if no PMU present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	b.lt	9000f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 9000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)  * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	.macro	reset_amuserenr_el0, tmpreg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	mrs	\tmpreg, id_aa64pfr0_el1	// Check ID_AA64PFR0_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	ubfx	\tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	cbz	\tmpreg, .Lskip_\@		// Skip if no AMU present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	msr_s	SYS_AMUSERENR_EL0, xzr		// Disable AMU access from EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) .Lskip_\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)  * copy_page - copy src to dest using temp registers t1-t8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 9998:	ldp	\t1, \t2, [\src]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	ldp	\t3, \t4, [\src, #16]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	ldp	\t5, \t6, [\src, #32]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	ldp	\t7, \t8, [\src, #48]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	add	\src, \src, #64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	stnp	\t1, \t2, [\dest]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	stnp	\t3, \t4, [\dest, #16]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	stnp	\t5, \t6, [\dest, #32]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	stnp	\t7, \t8, [\dest, #48]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	add	\dest, \dest, #64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	tst	\src, #(PAGE_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	b.ne	9998b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)  * Annotate a function as being unsuitable for kprobes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) #ifdef CONFIG_KPROBES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) #define NOKPROBE(x)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	.pushsection "_kprobe_blacklist", "aw";	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	.quad	x;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	.popsection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) #define NOKPROBE(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) #define EXPORT_SYMBOL_NOKASAN(name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) #define EXPORT_SYMBOL_NOKASAN(name)	EXPORT_SYMBOL(name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	 * Emit a 64-bit absolute little endian symbol reference in a way that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	 * ensures that it will be resolved at build time, even when building a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	 * PIE binary. This requires cooperation from the linker script, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	 * must emit the lo32/hi32 halves individually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	.macro	le64sym, sym
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	.long	\sym\()_lo32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	.long	\sym\()_hi32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	 * mov_q - move an immediate constant into a 64-bit register using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	 *         between 2 and 4 movz/movk instructions (depending on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	 *         magnitude and sign of the operand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	.macro	mov_q, reg, val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	movz	\reg, :abs_g1_s:\val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	movz	\reg, :abs_g2_s:\val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	.else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	movz	\reg, :abs_g3:\val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	movk	\reg, :abs_g2_nc:\val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	movk	\reg, :abs_g1_nc:\val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	movk	\reg, :abs_g0_nc:\val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)  * Return the current task_struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	.macro	get_current_task, rd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	mrs	\rd, sp_el0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)  * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)  * orr is used as it can cover the immediate value (and is idempotent).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)  * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)  * 	ttbr: Value of ttbr to set, modified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	.macro	offset_ttbr1, ttbr, tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) #ifdef CONFIG_ARM64_VA_BITS_52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	mrs_s	\tmp, SYS_ID_AA64MMFR2_EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	and	\tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	cbnz	\tmp, .Lskipoffs_\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) .Lskipoffs_\@ :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)  * Perform the reverse of offset_ttbr1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)  * bic is used as it can cover the immediate value and, in future, won't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)  * to be nop'ed out when dealing with 52-bit kernel VAs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	.macro	restore_ttbr1, ttbr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) #ifdef CONFIG_ARM64_VA_BITS_52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	bic	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)  * Arrange a physical address in a TTBR register, taking care of 52-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)  * addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)  * 	phys:	physical address, preserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)  * 	ttbr:	returns the TTBR value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	.macro	phys_to_ttbr, ttbr, phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) #ifdef CONFIG_ARM64_PA_BITS_52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	orr	\ttbr, \phys, \phys, lsr #46
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	mov	\ttbr, \phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	.macro	phys_to_pte, pte, phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) #ifdef CONFIG_ARM64_PA_BITS_52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	 * We assume \phys is 64K aligned and this is guaranteed by only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	 * supporting this configuration with 64K pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	orr	\pte, \phys, \phys, lsr #36
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	and	\pte, \pte, #PTE_ADDR_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	mov	\pte, \phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	.macro	pte_to_phys, phys, pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) #ifdef CONFIG_ARM64_PA_BITS_52
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	ubfiz	\phys, \pte, #(48 - 16 - 12), #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	bfxil	\phys, \pte, #16, #32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	lsl	\phys, \phys, #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	and	\phys, \pte, #PTE_ADDR_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)  * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	.macro	tcr_clear_errata_bits, tcr, tmp1, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) #ifdef CONFIG_FUJITSU_ERRATUM_010001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	mrs	\tmp1, midr_el1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	and	\tmp1, \tmp1, \tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	cmp	\tmp1, \tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	b.ne	10f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	mov_q	\tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	bic	\tcr, \tcr, \tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) #endif /* CONFIG_FUJITSU_ERRATUM_010001 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)  * Errata workaround prior to disable MMU. Insert an ISB immediately prior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)  * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	.macro pre_disable_mmu_workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	 * frame_push - Push @regcount callee saved registers to the stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	 *              starting at x19, as well as x29/x30, and set x29 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	 *              the new value of sp. Add @extra bytes of stack space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	 *              for locals.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	.macro		frame_push, regcount:req, extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	__frame		st, \regcount, \extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	 * frame_pop  - Pop the callee saved registers from the stack that were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	 *              pushed in the most recent call to frame_push, as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	 *              as x29/x30 and any extra stack space that may have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	 *              allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	.macro		frame_pop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	__frame		ld
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	.macro		__frame_regs, reg1, reg2, op, num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	.if		.Lframe_regcount == \num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	\op\()r		\reg1, [sp, #(\num + 1) * 8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	.elseif		.Lframe_regcount > \num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	\op\()p		\reg1, \reg2, [sp, #(\num + 1) * 8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	.macro		__frame, op, regcount, extra=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	.ifc		\op, st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	.if		(\regcount) < 0 || (\regcount) > 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	.error		"regcount should be in the range [0 ... 10]"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	.if		((\extra) % 16) != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	.error		"extra should be a multiple of 16 bytes"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	.ifdef		.Lframe_regcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	.if		.Lframe_regcount != -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	.error		"frame_push/frame_pop may not be nested"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	.set		.Lframe_regcount, \regcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	.set		.Lframe_extra, \extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	.set		.Lframe_local_offset, ((\regcount + 3) / 2) * 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	stp		x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	mov		x29, sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	__frame_regs	x19, x20, \op, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	__frame_regs	x21, x22, \op, 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	__frame_regs	x23, x24, \op, 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	__frame_regs	x25, x26, \op, 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	__frame_regs	x27, x28, \op, 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	.ifc		\op, ld
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	.if		.Lframe_regcount == -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	.error		"frame_push/frame_pop may not be nested"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	ldp		x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	.set		.Lframe_regcount, -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	.endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)  * Set SCTLR_ELx to the @reg value, and invalidate the local icache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)  * in the process. This is called when setting the MMU on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) .macro set_sctlr, sreg, reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	msr	\sreg, \reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	 * Invalidate the local I-cache so that any instructions fetched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	 * speculatively from the PoC are discarded, since they may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	 * been dynamically patched at the PoU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	ic	iallu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	dsb	nsh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) .macro set_sctlr_el1, reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	set_sctlr sctlr_el1, \reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) .macro set_sctlr_el2, reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	set_sctlr sctlr_el2, \reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	 * Check whether preempt/bh-disabled asm code should yield as soon as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	 * it is able. This is the case if we are currently running in task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	 * context, and either a softirq is pending, or the TIF_NEED_RESCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	 * flag is set and re-enabling preemption a single time would result in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	 * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	 * stored negated in the top word of the thread_info::preempt_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	 * field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	.macro		cond_yield, lbl:req, tmp:req, tmp2:req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	get_current_task \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	ldr		\tmp, [\tmp, #TSK_TI_PREEMPT]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	 * If we are serving a softirq, there is no point in yielding: the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	 * softirq will not be preempted no matter what we do, so we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	 * run to completion as quickly as we can.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	tbnz		\tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) #ifdef CONFIG_PREEMPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	sub		\tmp, \tmp, #PREEMPT_DISABLE_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	cbz		\tmp, \lbl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	adr_l		\tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	this_cpu_offset	\tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	ldr		w\tmp, [\tmp, \tmp2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	cbnz		w\tmp, \lbl	// yield on pending softirq in task context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) .Lnoyield_\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)  * This macro emits a program property note section identifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)  * architecture features which require special handling, mainly for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)  * use in assembly files included in the VDSO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) #define NT_GNU_PROPERTY_TYPE_0  5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) #define GNU_PROPERTY_AARCH64_FEATURE_1_AND      0xc0000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) #define GNU_PROPERTY_AARCH64_FEATURE_1_BTI      (1U << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) #define GNU_PROPERTY_AARCH64_FEATURE_1_PAC      (1U << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) #ifdef CONFIG_ARM64_BTI_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) #define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 		((GNU_PROPERTY_AARCH64_FEATURE_1_BTI |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 		  GNU_PROPERTY_AARCH64_FEATURE_1_PAC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) #ifdef GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) .macro emit_aarch64_feature_1_and, feat=GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	.pushsection .note.gnu.property, "a"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	.align  3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	.long   2f - 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	.long   6f - 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	.long   NT_GNU_PROPERTY_TYPE_0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 1:      .string "GNU"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	.align  3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 3:      .long   GNU_PROPERTY_AARCH64_FEATURE_1_AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	.long   5f - 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	 * This is described with an array of char in the Linux API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	 * spec but the text and all other usage (including binutils,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	 * clang and GCC) treat this as a 32 bit value so no swizzling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	 * is required for big endian.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	.long   \feat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	.align  3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	.popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) .macro emit_aarch64_feature_1_and, feat=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	.macro __mitigate_spectre_bhb_loop      tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) alternative_cb  spectre_bhb_patch_loop_iter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	mov	\tmp, #32		// Patched to correct the immediate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) alternative_cb_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) .Lspectre_bhb_loop\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	b	. + 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	subs	\tmp, \tmp, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	b.ne	.Lspectre_bhb_loop\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 	sb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 	.macro mitigate_spectre_bhb_loop	tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) alternative_cb	spectre_bhb_patch_loop_mitigation_enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 	b	.L_spectre_bhb_loop_done\@	// Patched to NOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) alternative_cb_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 	__mitigate_spectre_bhb_loop	\tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) .L_spectre_bhb_loop_done\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 	/* Save/restores x0-x3 to the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 	.macro __mitigate_spectre_bhb_fw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 	stp	x0, x1, [sp, #-16]!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 	stp	x2, x3, [sp, #-16]!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) alternative_cb	smccc_patch_fw_mitigation_conduit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 	nop					// Patched to SMC/HVC #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) alternative_cb_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	ldp	x2, x3, [sp], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 	ldp	x0, x1, [sp], #16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 	.macro mitigate_spectre_bhb_clear_insn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) alternative_cb	spectre_bhb_patch_clearbhb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 	/* Patched to NOP when not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 	clearbhb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 	isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) alternative_cb_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 	.endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) #endif	/* __ASM_ASSEMBLER_H */