Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #include <asm/asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <asm/bitsperlong.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <asm/kvm_vcpu_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <asm/nospec-branch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #define WORD_SIZE (BITS_PER_LONG / 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) /* Intentionally omit RAX as it's context switched by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #define VCPU_RCX	__VCPU_REGS_RCX * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #define VCPU_RDX	__VCPU_REGS_RDX * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #define VCPU_RBX	__VCPU_REGS_RBX * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) /* Intentionally omit RSP as it's context switched by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define VCPU_RBP	__VCPU_REGS_RBP * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define VCPU_RSI	__VCPU_REGS_RSI * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define VCPU_RDI	__VCPU_REGS_RDI * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define VCPU_R8		__VCPU_REGS_R8  * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define VCPU_R9		__VCPU_REGS_R9  * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define VCPU_R10	__VCPU_REGS_R10 * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define VCPU_R11	__VCPU_REGS_R11 * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define VCPU_R12	__VCPU_REGS_R12 * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define VCPU_R13	__VCPU_REGS_R13 * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define VCPU_R14	__VCPU_REGS_R14 * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) .section .noinstr.text, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * @vmcb_pa:	unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * @regs:	unsigned long * (to guest registers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) SYM_FUNC_START(__svm_vcpu_run)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	push %_ASM_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	push %r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	push %r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	push %r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	push %r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	push %edi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	push %esi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	push %_ASM_BX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	/* Save @regs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	push %_ASM_ARG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	/* Save @vmcb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	push %_ASM_ARG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	/* Move @regs to RAX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	mov %_ASM_ARG2, %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	/* Load guest registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	mov VCPU_RDX(%_ASM_AX), %_ASM_DX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	mov VCPU_R8 (%_ASM_AX),  %r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	mov VCPU_R9 (%_ASM_AX),  %r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	mov VCPU_R10(%_ASM_AX), %r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	mov VCPU_R11(%_ASM_AX), %r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	mov VCPU_R12(%_ASM_AX), %r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	mov VCPU_R13(%_ASM_AX), %r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	mov VCPU_R14(%_ASM_AX), %r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	mov VCPU_R15(%_ASM_AX), %r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	/* "POP" @vmcb to RAX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	pop %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	/* Enter guest mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	sti
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 1:	vmload %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	jmp 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 2:	cmpb $0, kvm_rebooting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	jne 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	ud2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	_ASM_EXTABLE(1b, 2b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 3:	vmrun %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	jmp 5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 4:	cmpb $0, kvm_rebooting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	jne 5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	ud2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	_ASM_EXTABLE(3b, 4b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 5:	vmsave %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	jmp 7f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 6:	cmpb $0, kvm_rebooting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	jne 7f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	ud2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	_ASM_EXTABLE(5b, 6b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	cli
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #ifdef CONFIG_RETPOLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	/* "POP" @regs to RAX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	pop %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	/* Save all guest registers.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	mov %r8,  VCPU_R8 (%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	mov %r9,  VCPU_R9 (%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	mov %r10, VCPU_R10(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	mov %r11, VCPU_R11(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	mov %r12, VCPU_R12(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	mov %r13, VCPU_R13(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	mov %r14, VCPU_R14(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	mov %r15, VCPU_R15(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	 * Clear all general purpose registers except RSP and RAX to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	 * speculative use of the guest's values, even those that are reloaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	 * via the stack.  In theory, an L1 cache miss when restoring registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	 * could lead to speculative execution with the guest's values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	 * free.  RSP and RAX are exempt as they are restored by hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 * during VM-Exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	xor %ecx, %ecx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	xor %edx, %edx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	xor %ebx, %ebx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	xor %ebp, %ebp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	xor %esi, %esi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	xor %edi, %edi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	xor %r8d,  %r8d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	xor %r9d,  %r9d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	xor %r10d, %r10d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	xor %r11d, %r11d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	xor %r12d, %r12d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	xor %r13d, %r13d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	xor %r14d, %r14d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	xor %r15d, %r15d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	pop %_ASM_BX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	pop %r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	pop %r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	pop %r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	pop %r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	pop %esi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	pop %edi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	pop %_ASM_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) SYM_FUNC_END(__svm_vcpu_run)