^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* CPU virtualization extensions handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This should carry the code for handling CPU virtualization extensions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * that needs to live in the kernel core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Eduardo Habkost <ehabkost@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2008, Red Hat Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #ifndef _ASM_X86_VIRTEX_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define _ASM_X86_VIRTEX_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/vmx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/svm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * VMX functions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static inline int cpu_has_vmx(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned long ecx = cpuid_ecx(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * cpu_vmxoff() - Disable VMX on the current CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * atomically track post-VMXON state, e.g. this may be called in NMI context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * magically in RM, VM86, compat mode, or at CPL>0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static inline void cpu_vmxoff(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) asm_volatile_goto("1: vmxoff\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) _ASM_EXTABLE(1b, %l[fault]) :::: fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) cr4_clear_bits(X86_CR4_VMXE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline int cpu_vmx_enabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return __read_cr4() & X86_CR4_VMXE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /** Disable VMX if it is enabled on the current CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * You shouldn't call this if cpu_has_vmx() returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline void __cpu_emergency_vmxoff(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (cpu_vmx_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) cpu_vmxoff();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /** Disable VMX if it is supported and enabled on the current CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static inline void cpu_emergency_vmxoff(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (cpu_has_vmx())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) __cpu_emergency_vmxoff();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * SVM functions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /** Check if the CPU has SVM support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * You can use the 'msg' arg to get a message describing the problem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * if the function returns zero. Simply pass NULL if you are not interested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * on the messages; gcc should take care of not generating code for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * the messages on this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static inline int cpu_has_svm(const char **msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *msg = "not amd or hygon";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *msg = "can't execute cpuid_8000000a";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (!boot_cpu_has(X86_FEATURE_SVM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *msg = "svm not available";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /** Disable SVM on the current CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * You should call this only if cpu_has_svm() returned true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static inline void cpu_svm_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) uint64_t efer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) wrmsrl(MSR_VM_HSAVE_PA, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) rdmsrl(MSR_EFER, efer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) wrmsrl(MSR_EFER, efer & ~EFER_SVME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /** Makes sure SVM is disabled, if it is supported on the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline void cpu_emergency_svm_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (cpu_has_svm(NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) cpu_svm_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #endif /* _ASM_X86_VIRTEX_H */