^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <asm/asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <asm/bitsperlong.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/kvm_vcpu_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <asm/nospec-branch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/segment.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define WORD_SIZE (BITS_PER_LONG / 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* Intentionally omit RSP as it's context switched by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) .section .noinstr.text, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * vmx_vmenter - VM-Enter the current loaded VMCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * %RFLAGS.CF is set on VM-Fail Invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * %RFLAGS.ZF is set on VM-Fail Valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Note that VMRESUME/VMLAUNCH fall-through and return directly if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * to vmx_vmexit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) SYM_FUNC_START(vmx_vmenter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) je 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) 1: vmresume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) 2: vmlaunch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) 3: cmpb $0, kvm_rebooting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) je 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) 4: ud2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) _ASM_EXTABLE(1b, 3b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) _ASM_EXTABLE(2b, 3b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) SYM_FUNC_END(vmx_vmenter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * vmx_vmexit - Handle a VMX VM-Exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * This is vmx_vmenter's partner in crime. On a VM-Exit, control will jump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * here after hardware loads the host's state, i.e. this is the destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * referred to by VMCS.HOST_RIP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) SYM_FUNC_START(vmx_vmexit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #ifdef CONFIG_RETPOLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* Preserve guest's RAX, it's used to stuff the RSB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) push %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) or $1, %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) pop %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) .Lvmexit_skip_rsb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) SYM_FUNC_END(vmx_vmexit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @vmx: struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @regs: unsigned long * (to guest registers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * @launched: %true if the VMCS has been launched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * 0 on VM-Exit, 1 on VM-Fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) SYM_FUNC_START(__vmx_vcpu_run)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) push %_ASM_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) mov %_ASM_SP, %_ASM_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) push %r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) push %r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) push %r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) push %r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) push %edi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) push %esi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) push %_ASM_BX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @regs is needed after VM-Exit to save the guest's register values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) push %_ASM_ARG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* Copy @launched to BL, _ASM_ARG3 is volatile. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) mov %_ASM_ARG3B, %bl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Adjust RSP to account for the CALL to vmx_vmenter(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) call vmx_update_host_rsp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Load @regs to RAX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) mov (%_ASM_SP), %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Check if vmlaunch or vmresume is needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) cmpb $0, %bl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* Load guest registers. Don't clobber flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) mov VCPU_RCX(%_ASM_AX), %_ASM_CX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) mov VCPU_RDX(%_ASM_AX), %_ASM_DX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) mov VCPU_RBX(%_ASM_AX), %_ASM_BX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) mov VCPU_RBP(%_ASM_AX), %_ASM_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) mov VCPU_RSI(%_ASM_AX), %_ASM_SI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) mov VCPU_RDI(%_ASM_AX), %_ASM_DI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) mov VCPU_R8 (%_ASM_AX), %r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) mov VCPU_R9 (%_ASM_AX), %r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) mov VCPU_R10(%_ASM_AX), %r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) mov VCPU_R11(%_ASM_AX), %r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) mov VCPU_R12(%_ASM_AX), %r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) mov VCPU_R13(%_ASM_AX), %r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) mov VCPU_R14(%_ASM_AX), %r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) mov VCPU_R15(%_ASM_AX), %r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* Load guest RAX. This kills the @regs pointer! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) mov VCPU_RAX(%_ASM_AX), %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* Enter guest mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) call vmx_vmenter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* Jump on VM-Fail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) jbe 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* Temporarily save guest's RAX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) push %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Reload @regs to RAX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) mov WORD_SIZE(%_ASM_SP), %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Save all guest registers, including RAX from the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) pop VCPU_RAX(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) mov %r8, VCPU_R8 (%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) mov %r9, VCPU_R9 (%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) mov %r10, VCPU_R10(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) mov %r11, VCPU_R11(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) mov %r12, VCPU_R12(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) mov %r13, VCPU_R13(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) mov %r14, VCPU_R14(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) mov %r15, VCPU_R15(%_ASM_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) xor %eax, %eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * Clear all general purpose registers except RSP and RAX to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * speculative use of the guest's values, even those that are reloaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * via the stack. In theory, an L1 cache miss when restoring registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * could lead to speculative execution with the guest's values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * free. RSP and RAX are exempt as RSP is restored by hardware during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 1: xor %ecx, %ecx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) xor %edx, %edx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) xor %ebx, %ebx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) xor %ebp, %ebp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) xor %esi, %esi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) xor %edi, %edi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) xor %r8d, %r8d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) xor %r9d, %r9d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) xor %r10d, %r10d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) xor %r11d, %r11d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) xor %r12d, %r12d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) xor %r13d, %r13d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) xor %r14d, %r14d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) xor %r15d, %r15d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* "POP" @regs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) add $WORD_SIZE, %_ASM_SP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) pop %_ASM_BX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) pop %r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) pop %r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) pop %r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) pop %r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) pop %esi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) pop %edi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) pop %_ASM_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 2: mov $1, %eax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) jmp 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) SYM_FUNC_END(__vmx_vcpu_run)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) .section .text, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * vmread_error_trampoline - Trampoline from inline asm to vmread_error()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * @field: VMCS field encoding that failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * @fault: %true if the VMREAD faulted, %false if it failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * Save and restore volatile registers across a call to vmread_error(). Note,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * all parameters are passed on the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) SYM_FUNC_START(vmread_error_trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) push %_ASM_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) mov %_ASM_SP, %_ASM_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) push %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) push %_ASM_CX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) push %_ASM_DX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) push %rdi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) push %rsi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) push %r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) push %r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) push %r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) push %r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* Load @field and @fault to arg1 and arg2 respectively. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) mov 3*WORD_SIZE(%rbp), %_ASM_ARG2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) mov 2*WORD_SIZE(%rbp), %_ASM_ARG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Parameters are passed on the stack for 32-bit (see asmlinkage). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) push 3*WORD_SIZE(%ebp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) push 2*WORD_SIZE(%ebp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) call vmread_error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #ifndef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) add $8, %esp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* Zero out @fault, which will be popped into the result register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) pop %r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) pop %r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) pop %r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) pop %r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) pop %rsi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) pop %rdi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) pop %_ASM_DX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) pop %_ASM_CX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) pop %_ASM_AX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) pop %_ASM_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) SYM_FUNC_END(vmread_error_trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * Unconditionally create a stack frame, getting the correct RSP on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * stack (for x86-64) would take two instructions anyways, and RBP can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * be used to restore RSP to make objtool happy (see below).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) push %_ASM_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) mov %_ASM_SP, %_ASM_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * Align RSP to a 16-byte boundary (to emulate CPU behavior) before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * creating the synthetic interrupt stack frame for the IRQ/NMI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) and $-16, %rsp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) push $__KERNEL_DS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) push %rbp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) pushf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) push $__KERNEL_CS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) CALL_NOSPEC _ASM_ARG1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * "Restore" RSP from RBP, even though IRET has already unwound RSP to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * the correct value. objtool doesn't know the callee will IRET and,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * without the explicit restore, thinks the stack is getting walloped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * Using an unwind hint is problematic due to x86-64's dynamic alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) mov %_ASM_BP, %_ASM_SP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) pop %_ASM_BP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff)