Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #ifndef _ASM_X86_NOSPEC_BRANCH_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #define _ASM_X86_NOSPEC_BRANCH_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/static_key.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/objtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/alternative-asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/cpufeatures.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/msr-index.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/unwind_hints.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * Fill the CPU return stack buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * Each entry in the RSB, if used for a speculative 'ret', contains an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * infinite 'pause; lfence; jmp' loop to capture speculative execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * This is required in various cases for retpoline and IBRS-based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * mitigations for the Spectre variant 2 vulnerability. Sometimes to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * eliminate potentially bogus entries from the RSB, and sometimes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * purely to ensure that it doesn't get empty, which on some CPUs would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * allow predictions from other (unwanted!) sources to be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * We define a CPP macro such that it can be used from both .S files and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * inline assembly. It's possible to do a .macro and then include that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define RSB_CLEAR_LOOPS		32	/* To forcibly overwrite all entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * Google experimented with loop-unrolling and this turned out to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * the optimal version — two calls, each with their own speculation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * trap should their return address end up getting used, in a loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define __FILL_RETURN_BUFFER(reg, nr, sp)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	mov	$(nr/2), reg;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 771:						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	ANNOTATE_INTRA_FUNCTION_CALL;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	call	772f;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 773:	/* speculation trap */			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	UNWIND_HINT_EMPTY;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	pause;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	lfence;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	jmp	773b;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 772:						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	ANNOTATE_INTRA_FUNCTION_CALL;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	call	774f;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 775:	/* speculation trap */			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	UNWIND_HINT_EMPTY;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	pause;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	lfence;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	jmp	775b;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 774:						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	add	$(BITS_PER_LONG/8) * 2, sp;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	dec	reg;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	jnz	771b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #ifdef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * This should be used immediately before an indirect jump/call. It tells
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * objtool the subsequent indirect jump/call is vouched safe for retpoline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * builds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) .macro ANNOTATE_RETPOLINE_SAFE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	.Lannotate_\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	.pushsection .discard.retpoline_safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	_ASM_PTR .Lannotate_\@
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	.popsection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * indirect jmp/call which may be susceptible to the Spectre variant 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * attack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) .macro JMP_NOSPEC reg:req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) #ifdef CONFIG_RETPOLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		      __stringify(jmp __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		      __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	jmp	*%\reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) .macro CALL_NOSPEC reg:req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) #ifdef CONFIG_RETPOLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		      __stringify(call __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		      __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	call	*%\reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)   * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)   * monstrosity above, manually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #ifdef CONFIG_RETPOLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) .Lskip_rsb_\@:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #else /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define ANNOTATE_RETPOLINE_SAFE					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	"999:\n\t"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	".pushsection .discard.retpoline_safe\n\t"		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	_ASM_PTR " 999b\n\t"					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	".popsection\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #ifdef CONFIG_RETPOLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * Inline asm uses the %V modifier which is only in newer GCC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * which is ensured when CONFIG_RETPOLINE is defined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) # define CALL_NOSPEC						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	ALTERNATIVE_2(						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	ANNOTATE_RETPOLINE_SAFE					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	"call *%[thunk_target]\n",				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	"call __x86_retpoline_%V[thunk_target]\n",		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	X86_FEATURE_RETPOLINE,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	"lfence;\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	ANNOTATE_RETPOLINE_SAFE					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	"call *%[thunk_target]\n",				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	X86_FEATURE_RETPOLINE_LFENCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #else /* CONFIG_X86_32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * For i386 we use the original ret-equivalent retpoline, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * otherwise we'll run out of registers. We don't care about CET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * here, anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) # define CALL_NOSPEC						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	ALTERNATIVE_2(						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	ANNOTATE_RETPOLINE_SAFE					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	"call *%[thunk_target]\n",				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	"       jmp    904f;\n"					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	"       .align 16\n"					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	"901:	call   903f;\n"					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	"902:	pause;\n"					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	"    	lfence;\n"					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	"       jmp    902b;\n"					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	"       .align 16\n"					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	"903:	lea    4(%%esp), %%esp;\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	"       pushl  %[thunk_target];\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	"       ret;\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	"       .align 16\n"					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	"904:	call   901b;\n",				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	X86_FEATURE_RETPOLINE,					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	"lfence;\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	ANNOTATE_RETPOLINE_SAFE					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	"call *%[thunk_target]\n",				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	X86_FEATURE_RETPOLINE_LFENCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #else /* No retpoline for C / inline asm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) # define CALL_NOSPEC "call *%[thunk_target]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* The Spectre V2 mitigation variants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) enum spectre_v2_mitigation {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	SPECTRE_V2_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	SPECTRE_V2_RETPOLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	SPECTRE_V2_LFENCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	SPECTRE_V2_EIBRS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	SPECTRE_V2_EIBRS_RETPOLINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	SPECTRE_V2_EIBRS_LFENCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* The indirect branch speculation control variants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) enum spectre_v2_user_mitigation {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	SPECTRE_V2_USER_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	SPECTRE_V2_USER_STRICT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	SPECTRE_V2_USER_STRICT_PREFERRED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	SPECTRE_V2_USER_PRCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	SPECTRE_V2_USER_SECCOMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* The Speculative Store Bypass disable variants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) enum ssb_mitigation {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	SPEC_STORE_BYPASS_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	SPEC_STORE_BYPASS_DISABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	SPEC_STORE_BYPASS_PRCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	SPEC_STORE_BYPASS_SECCOMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) extern char __indirect_thunk_start[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) extern char __indirect_thunk_end[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		: : "c" (msr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		    "a" ((u32)val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		    "d" ((u32)(val >> 32)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		    [feature] "i" (feature)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		: "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static inline void indirect_branch_prediction_barrier(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	u64 val = PRED_CMD_IBPB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* The Intel SPEC CTRL MSR base value cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) extern u64 x86_spec_ctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * With retpoline, we must use IBRS to restrict branch prediction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * before calling into firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * (Implemented as CPP macros due to header hell.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #define firmware_restrict_branch_speculation_start()			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) do {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	preempt_disable();						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			      X86_FEATURE_USE_IBRS_FW);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #define firmware_restrict_branch_speculation_end()			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) do {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	u64 val = x86_spec_ctrl_base;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			      X86_FEATURE_USE_IBRS_FW);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	preempt_enable();						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) DECLARE_STATIC_KEY_FALSE(mds_user_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #include <asm/segment.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * This uses the otherwise unused and obsolete VERW instruction in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  * combination with microcode which triggers a CPU buffer flush when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  * instruction is executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static __always_inline void mds_clear_cpu_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	static const u16 ds = __KERNEL_DS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	 * Has to be the memory-operand variant because only that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	 * guarantees the CPU buffer flush functionality according to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	 * documentation. The register-operand variant does not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	 * Works with any segment selector, but a valid writable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	 * data segment is the fastest variant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	 * "cc" clobber is required because VERW modifies ZF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  * Clear CPU buffers if the corresponding static key is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static __always_inline void mds_user_clear_cpu_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	if (static_branch_likely(&mds_user_clear))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		mds_clear_cpu_buffers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  * Clear CPU buffers if the corresponding static key is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static inline void mds_idle_clear_cpu_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	if (static_branch_likely(&mds_idle_clear))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		mds_clear_cpu_buffers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  * Below is used in the eBPF JIT compiler and emits the byte sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)  * for the following assembly:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)  * With retpolines configured:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)  *    callq do_rop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)  *  spec_trap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)  *    pause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)  *    lfence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  *    jmp spec_trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  *  do_rop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  *    mov %rcx,(%rsp) for x86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  *    mov %edx,(%esp) for x86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  *    retq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  * Without retpolines configured:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  *    jmp *%rcx for x86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  *    jmp *%edx for x86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #ifdef CONFIG_RETPOLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) # ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #  define RETPOLINE_RCX_BPF_JIT_SIZE	17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #  define RETPOLINE_RCX_BPF_JIT()				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	EMIT1_off32(0xE8, 7);	 /* callq do_rop */		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	/* spec_trap: */					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	EMIT2(0xF3, 0x90);       /* pause */			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	EMIT3(0x0F, 0xAE, 0xE8); /* lfence */			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	EMIT2(0xEB, 0xF9);       /* jmp spec_trap */		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	/* do_rop: */						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	EMIT4(0x48, 0x89, 0x0C, 0x24); /* mov %rcx,(%rsp) */	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	EMIT1(0xC3);             /* retq */			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) # else /* !CONFIG_X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) #  define RETPOLINE_EDX_BPF_JIT()				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	EMIT1_off32(0xE8, 7);	 /* call do_rop */		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	/* spec_trap: */					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	EMIT2(0xF3, 0x90);       /* pause */			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	EMIT3(0x0F, 0xAE, 0xE8); /* lfence */			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	EMIT2(0xEB, 0xF9);       /* jmp spec_trap */		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	/* do_rop: */						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	EMIT1(0xC3);             /* ret */			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) # endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) #else /* !CONFIG_RETPOLINE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) # ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #  define RETPOLINE_RCX_BPF_JIT_SIZE	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #  define RETPOLINE_RCX_BPF_JIT()				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	EMIT2(0xFF, 0xE1);       /* jmp *%rcx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) # else /* !CONFIG_X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #  define RETPOLINE_EDX_BPF_JIT()				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	EMIT2(0xFF, 0xE2)        /* jmp *%edx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) # endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */