^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) #define pr_fmt(fmt) "SVM: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include "irq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "mmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "kvm_cache_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "x86.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "cpuid.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "pmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mod_devicetable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/amd-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/trace_events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/hashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/objtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/psp-sev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/debugreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/kvm_para.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/irq_remapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/mce.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/spec-ctrl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/cpu_device_id.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/virtext.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "svm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define __ex(x) __kvm_handle_fault_on_reboot(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) MODULE_AUTHOR("Qumranet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #ifdef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static const struct x86_cpu_id svm_cpu_id[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define IOPM_ALLOC_ORDER 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define MSRPM_ALLOC_ORDER 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define SEG_TYPE_LDT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define SEG_TYPE_BUSY_TSS16 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define SVM_FEATURE_LBRV (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define SVM_FEATURE_SVML (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define SVM_FEATURE_TSC_RATE (1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define SVM_FEATURE_VMCB_CLEAN (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define SVM_FEATURE_FLUSH_ASID (1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define SVM_FEATURE_DECODE_ASSIST (1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define SVM_FEATURE_PAUSE_FILTER (1 << 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define TSC_RATIO_RSVD 0xffffff0000000000ULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define TSC_RATIO_MIN 0x0000000000000001ULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define TSC_RATIO_MAX 0x000000ffffffffffULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static bool erratum_383_found __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * Set osvw_len to higher value when updated Revision Guides
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * are published and we know what the new status bits are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static uint64_t osvw_len = 4, osvw_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static DEFINE_PER_CPU(u64, current_tsc_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define TSC_RATIO_DEFAULT 0x0100000000ULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static const struct svm_direct_access_msrs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) u32 index; /* Index of the MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) bool always; /* True if intercept is always on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) } direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) { .index = MSR_STAR, .always = true },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) { .index = MSR_IA32_SYSENTER_CS, .always = true },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) { .index = MSR_GS_BASE, .always = true },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) { .index = MSR_FS_BASE, .always = true },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) { .index = MSR_KERNEL_GS_BASE, .always = true },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) { .index = MSR_LSTAR, .always = true },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) { .index = MSR_CSTAR, .always = true },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) { .index = MSR_SYSCALL_MASK, .always = true },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) { .index = MSR_IA32_SPEC_CTRL, .always = false },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) { .index = MSR_IA32_PRED_CMD, .always = false },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) { .index = MSR_IA32_LASTINTFROMIP, .always = false },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) { .index = MSR_IA32_LASTINTTOIP, .always = false },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) { .index = MSR_INVALID, .always = false },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* enable NPT for AMD64 and X86 with PAE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) bool npt_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) bool npt_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * These 2 parameters are used to config the controls for Pause-Loop Exiting:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * pause_filter_count: On processors that support Pause filtering(indicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * count value. On VMRUN this value is loaded into an internal counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * Each time a pause instruction is executed, this counter is decremented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * until it reaches zero at which time a #VMEXIT is generated if pause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Intercept Filtering for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * This also indicate if ple logic enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * pause_filter_thresh: In addition, some processor families support advanced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * the amount of time a guest is allowed to execute in a pause loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * In this mode, a 16-bit pause filter threshold field is added in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * VMCB. The threshold value is a cycle count that is used to reset the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * pause counter. As with simple pause filtering, VMRUN loads the pause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * count value from VMCB into an internal counter. Then, on each pause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * instruction the hardware checks the elapsed number of cycles since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * the most recent pause instruction against the pause filter threshold.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * If the elapsed cycle count is greater than the pause filter threshold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * then the internal pause count is reloaded from the VMCB and execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * continues. If the elapsed cycle count is less than the pause filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * threshold, then the internal pause count is decremented. If the count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * triggered. If advanced pause filtering is supported and pause filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * threshold field is set to zero, the filter will operate in the simpler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * count only mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) module_param(pause_filter_thresh, ushort, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) module_param(pause_filter_count, ushort, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* Default doubles per-vcpu window every exit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) module_param(pause_filter_count_grow, ushort, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* Default resets per-vcpu window every exit to pause_filter_count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) module_param(pause_filter_count_shrink, ushort, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* Default is to compute the maximum so we can never overflow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) module_param(pause_filter_count_max, ushort, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* allow nested paging (virtualized MMU) for all guests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int npt = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) module_param(npt, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* allow nested virtualization in KVM/SVM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static int nested = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) module_param(nested, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* enable/disable Next RIP Save */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static int nrips = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) module_param(nrips, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* enable/disable Virtual VMLOAD VMSAVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static int vls = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) module_param(vls, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* enable/disable Virtual GIF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static int vgif = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) module_param(vgif, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* enable/disable SEV support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) module_param(sev, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static bool __read_mostly dump_invalid_vmcb = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) module_param(dump_invalid_vmcb, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static u8 rsm_ins_bytes[] = "\x0f\xaa";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static void svm_complete_interrupts(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static unsigned long iopm_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct kvm_ldttss_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) u16 limit0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) u16 base0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned base1:8, type:5, dpl:2, p:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned limit1:4, zero0:3, g:1, base2:8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) u32 base3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u32 zero1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) } __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define MSRS_RANGE_SIZE 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u32 svm_msrpm_offset(u32 msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) for (i = 0; i < NUM_MSR_MAPS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (msr < msrpm_ranges[i] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) offset += (i * MSRS_RANGE_SIZE); /* add range offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Now we have the u8 offset - but need the u32 offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return offset / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* MSR not in any range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return MSR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #define MAX_INST_SIZE 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static inline void clgi(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) asm volatile (__ex("clgi"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static inline void stgi(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) asm volatile (__ex("stgi"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static inline void invlpga(unsigned long addr, u32 asid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static int get_max_npt_level(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return PT64_ROOT_4LEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return PT32E_ROOT_LEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) u64 old_efer = vcpu->arch.efer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) vcpu->arch.efer = efer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (!npt_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* Shadow paging assumes NX to be available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) efer |= EFER_NX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (!(efer & EFER_LMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) efer &= ~EFER_LME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (!(efer & EFER_SVME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) svm_leave_nested(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) svm_set_gif(svm, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * Free the nested guest state, unless we are in SMM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * In this case we will return to the nested guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * as soon as we leave SMM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (!is_smm(&svm->vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) svm_free_nested(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int ret = svm_allocate_nested(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) vcpu->arch.efer = old_efer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) svm->vmcb->save.efer = efer | EFER_SVME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) vmcb_mark_dirty(svm->vmcb, VMCB_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static int is_external_interrupt(u32 info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) u32 ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (mask == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (nrips && svm->vmcb->control.next_rip != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) svm->next_rip = svm->vmcb->control.next_rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (!svm->next_rip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) kvm_rip_write(vcpu, svm->next_rip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) svm_set_interrupt_shadow(vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static void svm_queue_exception(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned nr = vcpu->arch.exception.nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) bool has_error_code = vcpu->arch.exception.has_error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) u32 error_code = vcpu->arch.exception.error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) kvm_deliver_exception_payload(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (nr == BP_VECTOR && !nrips) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * For guest debugging where we have to reinject #BP if some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * INT3 is guest-owned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * Emulate nRIP by moving RIP forward. Will fail if injection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * raises a fault that is not intercepted. Still better than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * failing in all cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) (void)skip_emulated_instruction(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) rip = kvm_rip_read(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) svm->int3_rip = rip + svm->vmcb->save.cs.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) svm->int3_injected = rip - old_rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) svm->vmcb->control.event_inj = nr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) | SVM_EVTINJ_VALID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) | SVM_EVTINJ_TYPE_EXEPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) svm->vmcb->control.event_inj_err = error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static void svm_init_erratum_383(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) u32 low, high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* Use _safe variants to not break nested virtualization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) val |= (1ULL << 47);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) low = lower_32_bits(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) high = upper_32_bits(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) erratum_383_found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static void svm_init_osvw(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * Guests should see errata 400 and 415 as fixed (assuming that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * HLT and IO instructions are intercepted).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) vcpu->arch.osvw.status = osvw_status & ~(6ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * By increasing VCPU's osvw.length to 3 we are telling the guest that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * all osvw.status bits inside that length, including bit 0 (which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * reserved for erratum 298), are valid. However, if host processor's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * osvw_len is 0 then osvw_status[0] carries no information. We need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * be conservative here and therefore we tell the guest that erratum 298
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * is present (because we really don't know).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) vcpu->arch.osvw.status |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int has_svm(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) const char *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!cpu_has_svm(&msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) printk(KERN_INFO "has_svm: %s\n", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (sev_active()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) pr_info("KVM is unsupported when running as an SEV guest\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static void svm_hardware_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* Make sure we clean up behind us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) cpu_svm_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) amd_pmu_disable_virt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static int svm_hardware_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct svm_cpu_data *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) uint64_t efer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct desc_struct *gdt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) int me = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) rdmsrl(MSR_EFER, efer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (efer & EFER_SVME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (!has_svm()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) sd = per_cpu(svm_data, me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (!sd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) pr_err("%s: svm_data is NULL on %d\n", __func__, me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) sd->asid_generation = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) sd->next_asid = sd->max_asid + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) sd->min_asid = max_sev_asid + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) gdt = get_current_gdt_rw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) wrmsrl(MSR_EFER, efer | EFER_SVME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * Get OSVW bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * Note that it is possible to have a system with mixed processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * revisions and therefore different OSVW bits. If bits are not the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * on different processors then choose the worst case (i.e. if erratum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * is present on one processor and not on another then assume that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * erratum is present everywhere).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) uint64_t len, status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) osvw_status = osvw_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (len < osvw_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) osvw_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) osvw_status |= status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) osvw_status &= (1ULL << osvw_len) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) osvw_status = osvw_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) svm_init_erratum_383();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) amd_pmu_enable_virt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static void svm_cpu_uninit(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (!sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) per_cpu(svm_data, cpu) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) kfree(sd->sev_vmcbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) __free_page(sd->save_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) kfree(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static int svm_cpu_init(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct svm_cpu_data *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (!sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) sd->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) sd->save_area = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (!sd->save_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) goto free_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (svm_sev_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) sizeof(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!sd->sev_vmcbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) goto free_save_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) per_cpu(svm_data, cpu) = sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) free_save_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) __free_page(sd->save_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) free_cpu_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) kfree(sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static int direct_access_msr_slot(u32 msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (direct_access_msrs[i].index == msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) int slot = direct_access_msr_slot(msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (slot == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* Set the shadow bitmaps to the desired intercept states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) set_bit(slot, svm->shadow_msr_intercept.read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) clear_bit(slot, svm->shadow_msr_intercept.read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) set_bit(slot, svm->shadow_msr_intercept.write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) clear_bit(slot, svm->shadow_msr_intercept.write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static bool valid_msr_intercept(u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return direct_access_msr_slot(index) != -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) u8 bit_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) u32 *msrpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) to_svm(vcpu)->msrpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) offset = svm_msrpm_offset(msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) bit_write = 2 * (msr & 0x0f) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) tmp = msrpm[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) BUG_ON(offset == MSR_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return !!test_bit(bit_write, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) u32 msr, int read, int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) u8 bit_read, bit_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * If this warning triggers extend the direct_access_msrs list at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * beginning of the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) WARN_ON(!valid_msr_intercept(msr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* Enforce non allowed MSRs to trap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (read && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (write && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) write = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) offset = svm_msrpm_offset(msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) bit_read = 2 * (msr & 0x0f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) bit_write = 2 * (msr & 0x0f) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) tmp = msrpm[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) BUG_ON(offset == MSR_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) msrpm[offset] = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) int read, int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) set_shadow_msr_intercept(vcpu, msr, read, write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) set_msr_interception_bitmap(vcpu, msrpm, msr, read, write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) u32 *svm_vcpu_alloc_msrpm(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) u32 *msrpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) msrpm = page_address(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return msrpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (!direct_access_msrs[i].always)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) void svm_vcpu_free_msrpm(u32 *msrpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) __free_pages(virt_to_page(msrpm), MSRPM_ALLOC_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * Set intercept permissions for all direct access MSRs again. They
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * will automatically get filtered through the MSR filter, so we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * back in sync after this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) u32 msr = direct_access_msrs[i].index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) u32 read = test_bit(i, svm->shadow_msr_intercept.read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) u32 write = test_bit(i, svm->shadow_msr_intercept.write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static void add_msr_offset(u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) for (i = 0; i < MSRPM_OFFSETS; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* Offset already in list? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (msrpm_offsets[i] == offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* Slot used by another offset? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (msrpm_offsets[i] != MSR_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* Add offset to list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) msrpm_offsets[i] = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * If this BUG triggers the msrpm_offsets table has an overflow. Just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * increase MSRPM_OFFSETS in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static void init_msrpm_offsets(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) offset = svm_msrpm_offset(direct_access_msrs[i].index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) BUG_ON(offset == MSR_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) add_msr_offset(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) void disable_nmi_singlestep(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) svm->nmi_singlestep = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* Clear our flags if they were not set by the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static void grow_ple_window(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct vmcb_control_area *control = &svm->vmcb->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) int old = control->pause_filter_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) control->pause_filter_count = __grow_ple_window(old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) pause_filter_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) pause_filter_count_grow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) pause_filter_count_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (control->pause_filter_count != old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) trace_kvm_ple_window_update(vcpu->vcpu_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) control->pause_filter_count, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) static void shrink_ple_window(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct vmcb_control_area *control = &svm->vmcb->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) int old = control->pause_filter_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) control->pause_filter_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) __shrink_ple_window(old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) pause_filter_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) pause_filter_count_shrink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) pause_filter_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (control->pause_filter_count != old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) trace_kvm_ple_window_update(vcpu->vcpu_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) control->pause_filter_count, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * The default MMIO mask is a single bit (excluding the present bit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * which could conflict with the memory encryption bit. Check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * memory encryption support and override the default MMIO mask if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * memory encryption is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static __init void svm_adjust_mmio_mask(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) unsigned int enc_bit, mask_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) u64 msr, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /* If there is no memory encryption support, use existing mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (cpuid_eax(0x80000000) < 0x8000001f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* If memory encryption is not enabled, use existing mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) rdmsrl(MSR_K8_SYSCFG, msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) mask_bit = boot_cpu_data.x86_phys_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /* Increment the mask bit if it is the same as the encryption bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (enc_bit == mask_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) mask_bit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * If the mask bit location is below 52, then some bits above the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * physical addressing limit will always be reserved, so use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * rsvd_bits() function to generate the mask. This mask, along with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * the present bit, will be used to generate a page fault with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * PFER.RSV = 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * If the mask bit location is 52 (or above), then clear the mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) static void svm_hardware_teardown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (svm_sev_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) sev_hardware_teardown();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) svm_cpu_uninit(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) iopm_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static __init void svm_set_cpu_caps(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) kvm_set_cpu_caps();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) supported_xss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /* CPUID 0x80000001 and 0x8000000A (SVM features) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (nested) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) kvm_cpu_cap_set(X86_FEATURE_SVM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (nrips)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) kvm_cpu_cap_set(X86_FEATURE_NRIPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (npt_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) kvm_cpu_cap_set(X86_FEATURE_NPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /* CPUID 0x80000008 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) boot_cpu_has(X86_FEATURE_AMD_SSBD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* Enable INVPCID feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) kvm_cpu_cap_check_and_set(X86_FEATURE_INVPCID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static __init int svm_hardware_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct page *iopm_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) void *iopm_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (!iopm_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) iopm_va = page_address(iopm_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) init_msrpm_offsets();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (boot_cpu_has(X86_FEATURE_NX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) kvm_enable_efer_bits(EFER_NX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) kvm_enable_efer_bits(EFER_FFXSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) kvm_has_tsc_control = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) kvm_tsc_scaling_ratio_frac_bits = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /* Check for pause filtering support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) pause_filter_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) pause_filter_thresh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) pause_filter_thresh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (nested) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (sev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (boot_cpu_has(X86_FEATURE_SEV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) r = sev_hardware_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) sev = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) sev = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) svm_adjust_mmio_mask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) r = svm_cpu_init(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (!boot_cpu_has(X86_FEATURE_NPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) npt_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (npt_enabled && !npt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) npt_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (nrips) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (!boot_cpu_has(X86_FEATURE_NRIPS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) nrips = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (avic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (!npt_enabled ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) !boot_cpu_has(X86_FEATURE_AVIC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) avic = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) pr_info("AVIC enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (vls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (!npt_enabled ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) !IS_ENABLED(CONFIG_X86_64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) vls = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) pr_info("Virtual VMLOAD VMSAVE supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (vgif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (!boot_cpu_has(X86_FEATURE_VGIF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) vgif = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) pr_info("Virtual GIF supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) svm_set_cpu_caps();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * It seems that on AMD processors PTE's accessed bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * being set by the CPU hardware before the NPF vmexit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * This is not expected behaviour and our tests fail because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * A workaround here is to disable support for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * In this case userspace can know if there is support using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * If future AMD CPU models change the behaviour described above,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * this variable can be changed accordingly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) allow_smaller_maxphyaddr = !npt_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) svm_hardware_teardown();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static void init_seg(struct vmcb_seg *seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) seg->selector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) seg->limit = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) seg->base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) seg->selector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) seg->attrib = SVM_SELECTOR_P_MASK | type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) seg->limit = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) seg->base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) u64 g_tsc_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (is_guest_mode(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) /* Write L1's TSC offset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) g_tsc_offset = svm->vmcb->control.tsc_offset -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) svm->nested.hsave->control.tsc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) svm->nested.hsave->control.tsc_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) trace_kvm_write_tsc_offset(vcpu->vcpu_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) svm->vmcb->control.tsc_offset - g_tsc_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return svm->vmcb->control.tsc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) static void svm_check_invpcid(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * Intercept INVPCID if shadow paging is enabled to sync/free shadow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * roots, or if INVPCID is disabled in the guest to inject #UD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (!npt_enabled ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) svm_set_intercept(svm, INTERCEPT_INVPCID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) svm_clr_intercept(svm, INTERCEPT_INVPCID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static void init_vmcb(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) struct vmcb_control_area *control = &svm->vmcb->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) struct vmcb_save_area *save = &svm->vmcb->save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) svm->vcpu.arch.hflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) svm_set_intercept(svm, INTERCEPT_CR0_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) svm_set_intercept(svm, INTERCEPT_CR3_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) svm_set_intercept(svm, INTERCEPT_CR4_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (!kvm_vcpu_apicv_active(&svm->vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) set_dr_intercepts(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) set_exception_intercept(svm, PF_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) set_exception_intercept(svm, UD_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) set_exception_intercept(svm, MC_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) set_exception_intercept(svm, AC_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) set_exception_intercept(svm, DB_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * Guest access to VMware backdoor ports could legitimately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * trigger #GP because of TSS I/O permission bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * We intercept those #GP and allow access to them anyway
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * as VMware does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (enable_vmware_backdoor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) set_exception_intercept(svm, GP_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) svm_set_intercept(svm, INTERCEPT_INTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) svm_set_intercept(svm, INTERCEPT_NMI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) svm_set_intercept(svm, INTERCEPT_SMI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) svm_set_intercept(svm, INTERCEPT_RDPMC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) svm_set_intercept(svm, INTERCEPT_CPUID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) svm_set_intercept(svm, INTERCEPT_INVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) svm_set_intercept(svm, INTERCEPT_INVLPG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) svm_set_intercept(svm, INTERCEPT_INVLPGA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) svm_set_intercept(svm, INTERCEPT_IOIO_PROT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) svm_set_intercept(svm, INTERCEPT_MSR_PROT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) svm_set_intercept(svm, INTERCEPT_TASK_SWITCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) svm_set_intercept(svm, INTERCEPT_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) svm_set_intercept(svm, INTERCEPT_VMRUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) svm_set_intercept(svm, INTERCEPT_VMMCALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) svm_set_intercept(svm, INTERCEPT_VMLOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) svm_set_intercept(svm, INTERCEPT_VMSAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) svm_set_intercept(svm, INTERCEPT_STGI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) svm_set_intercept(svm, INTERCEPT_CLGI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) svm_set_intercept(svm, INTERCEPT_SKINIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) svm_set_intercept(svm, INTERCEPT_WBINVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) svm_set_intercept(svm, INTERCEPT_XSETBV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) svm_set_intercept(svm, INTERCEPT_RDPRU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) svm_set_intercept(svm, INTERCEPT_RSM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) svm_set_intercept(svm, INTERCEPT_MONITOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) svm_set_intercept(svm, INTERCEPT_MWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (!kvm_hlt_in_guest(svm->vcpu.kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) svm_set_intercept(svm, INTERCEPT_HLT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) control->iopm_base_pa = __sme_set(iopm_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) control->int_ctl = V_INTR_MASKING_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) init_seg(&save->es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) init_seg(&save->ss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) init_seg(&save->ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) init_seg(&save->fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) init_seg(&save->gs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) save->cs.selector = 0xf000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) save->cs.base = 0xffff0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /* Executable/Readable Code Segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) save->cs.limit = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) save->gdtr.limit = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) save->idtr.limit = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) svm_set_cr4(&svm->vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) svm_set_efer(&svm->vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) save->dr6 = 0xffff0ff0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) kvm_set_rflags(&svm->vcpu, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) save->rip = 0x0000fff0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * It also updates the guest-visible cr0 value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) kvm_mmu_reset_context(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) save->cr4 = X86_CR4_PAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /* rdx = ?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (npt_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /* Setup VMCB for Nested Paging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) svm_clr_intercept(svm, INTERCEPT_INVLPG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) clr_exception_intercept(svm, PF_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) svm_clr_intercept(svm, INTERCEPT_CR3_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) svm_clr_intercept(svm, INTERCEPT_CR3_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) save->g_pat = svm->vcpu.arch.pat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) save->cr3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) save->cr4 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) svm->asid_generation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) svm->nested.vmcb12_gpa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) svm->vcpu.arch.hflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (!kvm_pause_in_guest(svm->vcpu.kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) control->pause_filter_count = pause_filter_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (pause_filter_thresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) control->pause_filter_thresh = pause_filter_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) svm_set_intercept(svm, INTERCEPT_PAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) svm_clr_intercept(svm, INTERCEPT_PAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) svm_check_invpcid(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (kvm_vcpu_apicv_active(&svm->vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) avic_init_vmcb(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * If hardware supports Virtual VMLOAD VMSAVE then enable it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * in VMCB and clear intercepts to avoid #VMEXIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (vls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) svm_clr_intercept(svm, INTERCEPT_VMLOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) svm_clr_intercept(svm, INTERCEPT_VMSAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (vgif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) svm_clr_intercept(svm, INTERCEPT_STGI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) svm_clr_intercept(svm, INTERCEPT_CLGI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (sev_guest(svm->vcpu.kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) clr_exception_intercept(svm, UD_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) vmcb_mark_all_dirty(svm->vmcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) enable_gif(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) u32 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) u32 eax = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) svm->spec_ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) svm->virt_spec_ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (!init_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) MSR_IA32_APICBASE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) init_vmcb(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) kvm_rdx_write(vcpu, eax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (kvm_vcpu_apicv_active(vcpu) && !init_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) static int svm_create_vcpu(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) struct vcpu_svm *svm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) struct page *vmcb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) vmcb_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (!vmcb_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) err = avic_init_vcpu(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) goto error_free_vmcb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) /* We initialize this flag to true to make sure that the is_running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * bit would be set the first time the vcpu is loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) svm->avic_is_running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) svm->msrpm = svm_vcpu_alloc_msrpm();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (!svm->msrpm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) goto error_free_vmcb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) svm_vcpu_init_msrpm(vcpu, svm->msrpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) svm->vmcb = page_address(vmcb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) svm->vmcb_pa = __sme_set(page_to_pfn(vmcb_page) << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) svm->asid_generation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) init_vmcb(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) svm_init_osvw(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) vcpu->arch.microcode_version = 0x01000065;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) error_free_vmcb_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) __free_page(vmcb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) static void svm_clear_current_vmcb(struct vmcb *vmcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) for_each_online_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) static void svm_free_vcpu(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * The vmcb page can be recycled, causing a false negative in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * svm_vcpu_load(). So, ensure that no logical CPU has this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * vmcb page recorded as its current vmcb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) svm_clear_current_vmcb(svm->vmcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) svm_free_nested(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (unlikely(cpu != vcpu->cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) svm->asid_generation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) vmcb_mark_all_dirty(svm->vmcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) savesegment(fs, svm->host.fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) savesegment(gs, svm->host.gs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) svm->host.ldt = kvm_read_ldt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) __this_cpu_write(current_tsc_ratio, tsc_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /* This assumes that the kernel never uses MSR_TSC_AUX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (static_cpu_has(X86_FEATURE_RDTSCP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (sd->current_vmcb != svm->vmcb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) sd->current_vmcb = svm->vmcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) indirect_branch_prediction_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) avic_vcpu_load(vcpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) static void svm_vcpu_put(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) avic_vcpu_put(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) ++vcpu->stat.host_state_reload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) kvm_load_ldt(svm->host.ldt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) loadsegment(fs, svm->host.fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) load_gs_index(svm->host.gs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) #ifdef CONFIG_X86_32_LAZY_GS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) loadsegment(gs, svm->host.gs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) unsigned long rflags = svm->vmcb->save.rflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (svm->nmi_singlestep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) /* Hide our flags if they were not set by the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) rflags &= ~X86_EFLAGS_TF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) rflags &= ~X86_EFLAGS_RF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) return rflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (to_svm(vcpu)->nmi_singlestep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * Any change of EFLAGS.VM is accompanied by a reload of SS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * (caused by either a task switch or an inter-privilege IRET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * so we do not need to update the CPL here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) to_svm(vcpu)->vmcb->save.rflags = rflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) case VCPU_EXREG_PDPTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) BUG_ON(!npt_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static void svm_set_vintr(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct vmcb_control_area *control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /* The following fields are ignored when AVIC is enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) svm_set_intercept(svm, INTERCEPT_VINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * This is just a dummy VINTR to actually cause a vmexit to happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * Actual injection of virtual interrupts happens through EVENTINJ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) control = &svm->vmcb->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) control->int_vector = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) control->int_ctl &= ~V_INTR_PRIO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) control->int_ctl |= V_IRQ_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static void svm_clear_vintr(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) svm_clr_intercept(svm, INTERCEPT_VINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /* Drop int_ctl fields related to VINTR injection. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (is_guest_mode(&svm->vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) svm->nested.hsave->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) (svm->nested.ctl.int_ctl & V_TPR_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) V_IRQ_INJECTION_BITS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) switch (seg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) case VCPU_SREG_CS: return &save->cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) case VCPU_SREG_DS: return &save->ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) case VCPU_SREG_ES: return &save->es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) case VCPU_SREG_FS: return &save->fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) case VCPU_SREG_GS: return &save->gs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) case VCPU_SREG_SS: return &save->ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) case VCPU_SREG_TR: return &save->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) case VCPU_SREG_LDTR: return &save->ldtr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct vmcb_seg *s = svm_seg(vcpu, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) return s->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) static void svm_get_segment(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) struct kvm_segment *var, int seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) struct vmcb_seg *s = svm_seg(vcpu, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) var->base = s->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) var->limit = s->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) var->selector = s->selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * AMD CPUs circa 2014 track the G bit for all segments except CS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * However, the SVM spec states that the G bit is not observed by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * CPU, and some VMware virtual CPUs drop the G bit for all segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) * So let's synthesize a legal G bit for all segments, this helps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * running KVM nested. It also helps cross-vendor migration, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * Intel's vmentry has a check on the 'G' bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) var->g = s->limit > 0xfffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * AMD's VMCB does not have an explicit unusable field, so emulate it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) * for cross vendor migration purposes by "not present"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) var->unusable = !var->present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) switch (seg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) case VCPU_SREG_TR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * Work around a bug where the busy flag in the tr selector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * isn't exposed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) var->type |= 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) case VCPU_SREG_DS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) case VCPU_SREG_ES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) case VCPU_SREG_FS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) case VCPU_SREG_GS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) * The accessed bit must always be set in the segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) * descriptor cache, although it can be cleared in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) * descriptor, the cached bit always remains at 1. Since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * Intel has a check on this, set it here to support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * cross-vendor migration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (!var->unusable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) var->type |= 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) case VCPU_SREG_SS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * On AMD CPUs sometimes the DB bit in the segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * descriptor is left as 1, although the whole segment has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * been made unusable. Clear it here to pass an Intel VMX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * entry check when cross vendor migrating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (var->unusable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) var->db = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) /* This is symmetric with svm_set_segment() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) var->dpl = to_svm(vcpu)->vmcb->save.cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static int svm_get_cpl(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) return save->cpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) dt->size = svm->vmcb->save.idtr.limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) dt->address = svm->vmcb->save.idtr.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) svm->vmcb->save.idtr.limit = dt->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) svm->vmcb->save.idtr.base = dt->address ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) vmcb_mark_dirty(svm->vmcb, VMCB_DT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) dt->size = svm->vmcb->save.gdtr.limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) dt->address = svm->vmcb->save.gdtr.base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) svm->vmcb->save.gdtr.limit = dt->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) svm->vmcb->save.gdtr.base = dt->address ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) vmcb_mark_dirty(svm->vmcb, VMCB_DT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) static void update_cr0_intercept(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) ulong gcr0 = svm->vcpu.arch.cr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) u64 *hcr0 = &svm->vmcb->save.cr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) | (gcr0 & SVM_CR0_SELECTIVE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) vmcb_mark_dirty(svm->vmcb, VMCB_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (gcr0 == *hcr0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) svm_clr_intercept(svm, INTERCEPT_CR0_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) svm_set_intercept(svm, INTERCEPT_CR0_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (vcpu->arch.efer & EFER_LME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) vcpu->arch.efer |= EFER_LMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) vcpu->arch.efer &= ~EFER_LMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) vcpu->arch.cr0 = cr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (!npt_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) cr0 |= X86_CR0_PG | X86_CR0_WP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) * re-enable caching here because the QEMU bios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * does not do it - this results in some delay at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * reboot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) svm->vmcb->save.cr0 = cr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) vmcb_mark_dirty(svm->vmcb, VMCB_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) update_cr0_intercept(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (cr4 & X86_CR4_VMXE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) svm_flush_tlb(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) vcpu->arch.cr4 = cr4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (!npt_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) cr4 |= X86_CR4_PAE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) cr4 |= host_cr4_mce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) to_svm(vcpu)->vmcb->save.cr4 = cr4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) static void svm_set_segment(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) struct kvm_segment *var, int seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) struct vmcb_seg *s = svm_seg(vcpu, seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) s->base = var->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) s->limit = var->limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) s->selector = var->selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * This is always accurate, except if SYSRET returned to a segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * with SS.DPL != 3. Intel does not have this quirk, and always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) * would entail passing the CPL to userspace and back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (seg == VCPU_SREG_SS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) /* This is symmetric with svm_get_segment() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) svm->vmcb->save.cpl = (var->dpl & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) static void update_exception_bitmap(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) clr_exception_intercept(svm, BP_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) set_exception_intercept(svm, BP_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (sd->next_asid > sd->max_asid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) ++sd->asid_generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) sd->next_asid = sd->min_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) svm->asid_generation = sd->asid_generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) svm->vmcb->control.asid = sd->next_asid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) struct vmcb *vmcb = svm->vmcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (unlikely(value != vmcb->save.dr6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) vmcb->save.dr6 = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) vmcb_mark_dirty(vmcb, VMCB_DR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) get_debugreg(vcpu->arch.db[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) get_debugreg(vcpu->arch.db[1], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) get_debugreg(vcpu->arch.db[2], 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) get_debugreg(vcpu->arch.db[3], 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * We cannot reset svm->vmcb->save.dr6 to DR6_FIXED_1|DR6_RTM here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) * because db_interception might need it. We can do it before vmentry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) vcpu->arch.dr6 = svm->vmcb->save.dr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) vcpu->arch.dr7 = svm->vmcb->save.dr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) set_dr_intercepts(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) svm->vmcb->save.dr7 = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) vmcb_mark_dirty(svm->vmcb, VMCB_DR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) static int pf_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) u64 fault_address = svm->vmcb->control.exit_info_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) u64 error_code = svm->vmcb->control.exit_info_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) svm->vmcb->control.insn_bytes : NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) svm->vmcb->control.insn_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) static int npf_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) u64 error_code = svm->vmcb->control.exit_info_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) trace_kvm_page_fault(fault_address, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) svm->vmcb->control.insn_bytes : NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) svm->vmcb->control.insn_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) static int db_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) struct kvm_run *kvm_run = svm->vcpu.run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) struct kvm_vcpu *vcpu = &svm->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (!(svm->vcpu.guest_debug &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) !svm->nmi_singlestep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) if (svm->nmi_singlestep) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) disable_nmi_singlestep(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) /* Make sure we check for pending NMIs upon entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) kvm_make_request(KVM_REQ_EVENT, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (svm->vcpu.guest_debug &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) kvm_run->exit_reason = KVM_EXIT_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) kvm_run->debug.arch.pc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) svm->vmcb->save.cs.base + svm->vmcb->save.rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) kvm_run->debug.arch.exception = DB_VECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) static int bp_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) struct kvm_run *kvm_run = svm->vcpu.run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) kvm_run->exit_reason = KVM_EXIT_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) kvm_run->debug.arch.exception = BP_VECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) static int ud_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) return handle_ud(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) static int ac_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) static int gp_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) struct kvm_vcpu *vcpu = &svm->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) u32 error_code = svm->vmcb->control.exit_info_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) WARN_ON_ONCE(!enable_vmware_backdoor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) * VMware backdoor emulation on #GP interception only handles IN{S},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) * OUT{S}, and RDPMC, none of which generate a non-zero error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (error_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) static bool is_erratum_383(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (!erratum_383_found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) /* Bit 62 may or may not be set for this mce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) value &= ~(1ULL << 62);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) if (value != 0xb600000000010015ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) /* Clear MCi_STATUS registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) for (i = 0; i < 6; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) u32 low, high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) value &= ~(1ULL << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) low = lower_32_bits(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) high = upper_32_bits(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) /* Flush tlb to evict multi-match entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) __flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) * Trigger machine check on the host. We assume all the MSRs are already set up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) * by the CPU and that we still run on the same CPU as the MCE occurred on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) * We pass a fake environment to the machine check handler because we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) * the guest to be always treated like user space, no matter what context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) * it used internally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) static void kvm_machine_check(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) #if defined(CONFIG_X86_MCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) struct pt_regs regs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) .cs = 3, /* Fake ring 3 no matter what the guest ran on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) .flags = X86_EFLAGS_IF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) do_machine_check(®s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) static void svm_handle_mce(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (is_erratum_383()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) * Erratum 383 triggered. Guest state is corrupt so kill the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) pr_err("KVM: Guest triggered AMD Erratum 383\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) * On an #MC intercept the MCE handler is not called automatically in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) * the host. So do it by hand here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) kvm_machine_check();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) static int mc_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) static int shutdown_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) struct kvm_run *kvm_run = svm->vcpu.run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * VMCB is undefined after a SHUTDOWN intercept
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) * so reinitialize it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) clear_page(svm->vmcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) init_vmcb(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) static int io_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) struct kvm_vcpu *vcpu = &svm->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) int size, in, string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) unsigned port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) ++svm->vcpu.stat.io_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) string = (io_info & SVM_IOIO_STR_MASK) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) if (string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) return kvm_emulate_instruction(vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) port = io_info >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) svm->next_rip = svm->vmcb->control.exit_info_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) return kvm_fast_pio(&svm->vcpu, size, port, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) static int nmi_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) static int intr_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) ++svm->vcpu.stat.irq_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) static int nop_on_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) static int halt_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) return kvm_emulate_halt(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) static int vmmcall_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) return kvm_emulate_hypercall(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) static int vmload_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) struct vmcb *nested_vmcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) struct kvm_host_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) if (nested_svm_check_permissions(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) if (ret == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) kvm_inject_gp(&svm->vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) nested_vmcb = map.hva;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) ret = kvm_skip_emulated_instruction(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) kvm_vcpu_unmap(&svm->vcpu, &map, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) static int vmsave_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) struct vmcb *nested_vmcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) struct kvm_host_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) if (nested_svm_check_permissions(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) if (ret == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) kvm_inject_gp(&svm->vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) nested_vmcb = map.hva;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) ret = kvm_skip_emulated_instruction(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) kvm_vcpu_unmap(&svm->vcpu, &map, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) static int vmrun_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) if (nested_svm_check_permissions(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) return nested_svm_vmrun(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) void svm_set_gif(struct vcpu_svm *svm, bool value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) if (value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) * If VGIF is enabled, the STGI intercept is only added to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) * detect the opening of the SMI/NMI window; remove it now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) * Likewise, clear the VINTR intercept, we will set it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) * again while processing KVM_REQ_EVENT if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (vgif_enabled(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) svm_clr_intercept(svm, INTERCEPT_STGI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) if (svm_is_intercept(svm, INTERCEPT_VINTR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) svm_clear_vintr(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) enable_gif(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) if (svm->vcpu.arch.smi_pending ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) svm->vcpu.arch.nmi_pending ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) kvm_cpu_has_injectable_intr(&svm->vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) disable_gif(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * After a CLGI no interrupts should come. But if vGIF is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) * in use, we still rely on the VINTR intercept (rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * STGI) to detect an open interrupt window.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) if (!vgif_enabled(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) svm_clear_vintr(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) static int stgi_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) if (nested_svm_check_permissions(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) ret = kvm_skip_emulated_instruction(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) svm_set_gif(svm, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) static int clgi_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) if (nested_svm_check_permissions(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) ret = kvm_skip_emulated_instruction(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) svm_set_gif(svm, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) static int invlpga_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) struct kvm_vcpu *vcpu = &svm->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) kvm_rax_read(&svm->vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) return kvm_skip_emulated_instruction(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) static int skinit_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) kvm_queue_exception(&svm->vcpu, UD_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) static int wbinvd_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) return kvm_emulate_wbinvd(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) static int xsetbv_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) u32 index = kvm_rcx_read(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) return kvm_skip_emulated_instruction(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) static int rdpru_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) kvm_queue_exception(&svm->vcpu, UD_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) static int task_switch_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) u16 tss_selector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) int reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) int int_type = svm->vmcb->control.exit_int_info &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) SVM_EXITINTINFO_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) uint32_t type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) uint32_t idt_v =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) bool has_error_code = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) u32 error_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) tss_selector = (u16)svm->vmcb->control.exit_info_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) if (svm->vmcb->control.exit_info_2 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) reason = TASK_SWITCH_IRET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) else if (svm->vmcb->control.exit_info_2 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) reason = TASK_SWITCH_JMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) else if (idt_v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) reason = TASK_SWITCH_GATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) reason = TASK_SWITCH_CALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) if (reason == TASK_SWITCH_GATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) case SVM_EXITINTINFO_TYPE_NMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) svm->vcpu.arch.nmi_injected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) case SVM_EXITINTINFO_TYPE_EXEPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if (svm->vmcb->control.exit_info_2 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) has_error_code = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) error_code =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) (u32)svm->vmcb->control.exit_info_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) kvm_clear_exception_queue(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) case SVM_EXITINTINFO_TYPE_INTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) kvm_clear_interrupt_queue(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) if (reason != TASK_SWITCH_GATE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) int_type == SVM_EXITINTINFO_TYPE_SOFT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (!skip_emulated_instruction(&svm->vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) int_vec = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) has_error_code, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) static int cpuid_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) return kvm_emulate_cpuid(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) static int iret_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) ++svm->vcpu.stat.nmi_window_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) svm_clr_intercept(svm, INTERCEPT_IRET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) svm->vcpu.arch.hflags |= HF_IRET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) static int invd_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) /* Treat an INVD instruction as a NOP and just skip it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) return kvm_skip_emulated_instruction(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) static int invlpg_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) return kvm_emulate_instruction(&svm->vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) return kvm_skip_emulated_instruction(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) static int emulate_on_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) return kvm_emulate_instruction(&svm->vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) static int rsm_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) static int rdpmc_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (!nrips)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) return emulate_on_interception(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) err = kvm_rdpmc(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) return kvm_complete_insn_gp(&svm->vcpu, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) unsigned long cr0 = svm->vcpu.arch.cr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) if (!is_guest_mode(&svm->vcpu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) cr0 &= ~SVM_CR0_SELECTIVE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) val &= ~SVM_CR0_SELECTIVE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) if (cr0 ^ val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) #define CR_VALID (1ULL << 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) static int cr_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) int reg, cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) return emulate_on_interception(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) return emulate_on_interception(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) if (cr >= 16) { /* mov to cr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) cr -= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) val = kvm_register_readl(&svm->vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) trace_kvm_cr_write(cr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) switch (cr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) if (!check_selective_cr0_intercepted(svm, val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) err = kvm_set_cr0(&svm->vcpu, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) err = kvm_set_cr3(&svm->vcpu, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) err = kvm_set_cr4(&svm->vcpu, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) err = kvm_set_cr8(&svm->vcpu, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) WARN(1, "unhandled write to CR%d", cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) kvm_queue_exception(&svm->vcpu, UD_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) } else { /* mov from cr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) switch (cr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) val = kvm_read_cr0(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) val = svm->vcpu.arch.cr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) val = kvm_read_cr3(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) val = kvm_read_cr4(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) val = kvm_get_cr8(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) WARN(1, "unhandled read from CR%d", cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) kvm_queue_exception(&svm->vcpu, UD_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) kvm_register_writel(&svm->vcpu, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) trace_kvm_cr_read(cr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) return kvm_complete_insn_gp(&svm->vcpu, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) static int dr_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) int reg, dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) if (svm->vcpu.guest_debug == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) * No more DR vmexits; force a reload of the debug registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) * and reenter on this instruction. The next vmexit will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) * retrieve the full state of the debug registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) clr_dr_intercepts(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) return emulate_on_interception(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) if (dr >= 16) { /* mov to DRn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) if (!kvm_require_dr(&svm->vcpu, dr - 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) val = kvm_register_readl(&svm->vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) kvm_set_dr(&svm->vcpu, dr - 16, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) if (!kvm_require_dr(&svm->vcpu, dr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) kvm_get_dr(&svm->vcpu, dr, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) kvm_register_writel(&svm->vcpu, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) return kvm_skip_emulated_instruction(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) static int cr8_write_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) struct kvm_run *kvm_run = svm->vcpu.run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) /* instruction emulation calls kvm_set_cr8() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) r = cr_interception(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) if (lapic_in_kernel(&svm->vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) kvm_run->exit_reason = KVM_EXIT_SET_TPR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) static int svm_get_msr_feature(struct kvm_msr_entry *msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) msr->data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) switch (msr->index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) case MSR_F10H_DECFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) case MSR_IA32_PERF_CAPABILITIES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) return KVM_MSR_RET_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) switch (msr_info->index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) case MSR_STAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) msr_info->data = svm->vmcb->save.star;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) case MSR_LSTAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) msr_info->data = svm->vmcb->save.lstar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) case MSR_CSTAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) msr_info->data = svm->vmcb->save.cstar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) case MSR_KERNEL_GS_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) msr_info->data = svm->vmcb->save.kernel_gs_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) case MSR_SYSCALL_MASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) msr_info->data = svm->vmcb->save.sfmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) case MSR_IA32_SYSENTER_CS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) msr_info->data = svm->vmcb->save.sysenter_cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) case MSR_IA32_SYSENTER_EIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) msr_info->data = svm->sysenter_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) case MSR_IA32_SYSENTER_ESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) msr_info->data = svm->sysenter_esp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) case MSR_TSC_AUX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) if (!boot_cpu_has(X86_FEATURE_RDTSCP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) if (!msr_info->host_initiated &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) msr_info->data = svm->tsc_aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) * Nobody will change the following 5 values in the VMCB so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) * safely return them on rdmsr. They will always be 0 until LBRV is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) * implemented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) case MSR_IA32_DEBUGCTLMSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) msr_info->data = svm->vmcb->save.dbgctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) case MSR_IA32_LASTBRANCHFROMIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) msr_info->data = svm->vmcb->save.br_from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) case MSR_IA32_LASTBRANCHTOIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) msr_info->data = svm->vmcb->save.br_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) case MSR_IA32_LASTINTFROMIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) msr_info->data = svm->vmcb->save.last_excp_from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) case MSR_IA32_LASTINTTOIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) msr_info->data = svm->vmcb->save.last_excp_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) case MSR_VM_HSAVE_PA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) msr_info->data = svm->nested.hsave_msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) case MSR_VM_CR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) msr_info->data = svm->nested.vm_cr_msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) case MSR_IA32_SPEC_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) if (!msr_info->host_initiated &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) !guest_has_spec_ctrl_msr(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) msr_info->data = svm->spec_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) case MSR_AMD64_VIRT_SPEC_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) if (!msr_info->host_initiated &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) msr_info->data = svm->virt_spec_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) case MSR_F15H_IC_CFG: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) int family, model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) family = guest_cpuid_family(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) model = guest_cpuid_model(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) if (family < 0 || model < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) return kvm_get_msr_common(vcpu, msr_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) msr_info->data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) if (family == 0x15 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) (model >= 0x2 && model < 0x20))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) msr_info->data = 0x1E;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) case MSR_F10H_DECFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) msr_info->data = svm->msr_decfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) return kvm_get_msr_common(vcpu, msr_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) static int rdmsr_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) return kvm_emulate_rdmsr(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) int svm_dis, chg_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) if (data & ~SVM_VM_CR_VALID_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) chg_mask = SVM_VM_CR_VALID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) svm->nested.vm_cr_msr &= ~chg_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) svm->nested.vm_cr_msr |= (data & chg_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) /* check for svm_disable while efer.svme is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) if (svm_dis && (vcpu->arch.efer & EFER_SVME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) u32 ecx = msr->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) u64 data = msr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) switch (ecx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) case MSR_IA32_CR_PAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) vcpu->arch.pat = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) svm->vmcb->save.g_pat = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) case MSR_IA32_SPEC_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) if (!msr->host_initiated &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) !guest_has_spec_ctrl_msr(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) if (kvm_spec_ctrl_test_value(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) svm->spec_ctrl = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) * For non-nested:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) * When it's written (to non-zero) for the first time, pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) * it through.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) * For nested:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) * The handling of the MSR bitmap for L2 guests is done in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) * nested_svm_vmrun_msrpm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) * We update the L1 MSR bit as well since it will end up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) * touching the MSR anyway now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) case MSR_IA32_PRED_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) if (!msr->host_initiated &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) !guest_has_pred_cmd_msr(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) if (data & ~PRED_CMD_IBPB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) if (!boot_cpu_has(X86_FEATURE_IBPB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) case MSR_AMD64_VIRT_SPEC_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) if (!msr->host_initiated &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) if (data & ~SPEC_CTRL_SSBD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) svm->virt_spec_ctrl = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) case MSR_STAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) svm->vmcb->save.star = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) case MSR_LSTAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) svm->vmcb->save.lstar = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) case MSR_CSTAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) svm->vmcb->save.cstar = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) case MSR_KERNEL_GS_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) svm->vmcb->save.kernel_gs_base = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) case MSR_SYSCALL_MASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) svm->vmcb->save.sfmask = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) case MSR_IA32_SYSENTER_CS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) svm->vmcb->save.sysenter_cs = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) case MSR_IA32_SYSENTER_EIP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) svm->sysenter_eip = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) svm->vmcb->save.sysenter_eip = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) case MSR_IA32_SYSENTER_ESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) svm->sysenter_esp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) svm->vmcb->save.sysenter_esp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) case MSR_TSC_AUX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) if (!boot_cpu_has(X86_FEATURE_RDTSCP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) if (!msr->host_initiated &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) * This is rare, so we update the MSR here instead of using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) * direct_access_msrs. Doing that would require a rdmsr in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) * svm_vcpu_put.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) svm->tsc_aux = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) case MSR_IA32_DEBUGCTLMSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) if (!boot_cpu_has(X86_FEATURE_LBRV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) __func__, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) if (data & DEBUGCTL_RESERVED_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) svm->vmcb->save.dbgctl = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) if (data & (1ULL<<0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) svm_enable_lbrv(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) svm_disable_lbrv(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) case MSR_VM_HSAVE_PA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) * Old kernels did not validate the value written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) * value to allow live migrating buggy or malicious guests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) * originating from those kernels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) if (!msr->host_initiated && !page_address_valid(vcpu, data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) svm->nested.hsave_msr = data & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) case MSR_VM_CR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) return svm_set_vm_cr(vcpu, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) case MSR_VM_IGNNE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) case MSR_F10H_DECFG: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) struct kvm_msr_entry msr_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) msr_entry.index = msr->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) if (svm_get_msr_feature(&msr_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) /* Check the supported bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) if (data & ~msr_entry.data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) /* Don't allow the guest to change a bit, #GP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) if (!msr->host_initiated && (data ^ msr_entry.data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) svm->msr_decfg = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) case MSR_IA32_APICBASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) if (kvm_vcpu_apicv_active(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) avic_update_vapic_bar(to_svm(vcpu), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) return kvm_set_msr_common(vcpu, msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) static int wrmsr_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) return kvm_emulate_wrmsr(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) static int msr_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) if (svm->vmcb->control.exit_info_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) return wrmsr_interception(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) return rdmsr_interception(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) static int interrupt_window_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) svm_clear_vintr(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) * For AVIC, the only reason to end up here is ExtINTs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) * In this case AVIC was temporarily disabled for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) * requesting the IRQ window and we have to re-enable it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) svm_toggle_avic_for_irq_window(&svm->vcpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) ++svm->vcpu.stat.irq_window_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) static int pause_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) struct kvm_vcpu *vcpu = &svm->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) bool in_kernel = (svm_get_cpl(vcpu) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) if (!kvm_pause_in_guest(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) grow_ple_window(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) kvm_vcpu_on_spin(vcpu, in_kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) static int nop_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) return kvm_skip_emulated_instruction(&(svm->vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) static int monitor_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) return nop_interception(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) static int mwait_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) return nop_interception(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) static int invpcid_interception(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) struct kvm_vcpu *vcpu = &svm->vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) unsigned long type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) gva_t gva;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) kvm_queue_exception(vcpu, UD_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) * For an INVPCID intercept:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) * EXITINFO1 provides the linear address of the memory operand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) * EXITINFO2 provides the contents of the register operand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) type = svm->vmcb->control.exit_info_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) gva = svm->vmcb->control.exit_info_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) if (type > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) kvm_inject_gp(vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) return kvm_handle_invpcid(vcpu, type, gva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) [SVM_EXIT_READ_CR0] = cr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) [SVM_EXIT_READ_CR3] = cr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) [SVM_EXIT_READ_CR4] = cr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) [SVM_EXIT_READ_CR8] = cr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) [SVM_EXIT_WRITE_CR0] = cr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) [SVM_EXIT_WRITE_CR3] = cr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) [SVM_EXIT_WRITE_CR4] = cr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) [SVM_EXIT_READ_DR0] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) [SVM_EXIT_READ_DR1] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) [SVM_EXIT_READ_DR2] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) [SVM_EXIT_READ_DR3] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) [SVM_EXIT_READ_DR4] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) [SVM_EXIT_READ_DR5] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) [SVM_EXIT_READ_DR6] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) [SVM_EXIT_READ_DR7] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) [SVM_EXIT_WRITE_DR0] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) [SVM_EXIT_WRITE_DR1] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) [SVM_EXIT_WRITE_DR2] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) [SVM_EXIT_WRITE_DR3] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) [SVM_EXIT_WRITE_DR4] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) [SVM_EXIT_WRITE_DR5] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) [SVM_EXIT_WRITE_DR6] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) [SVM_EXIT_WRITE_DR7] = dr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) [SVM_EXIT_INTR] = intr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) [SVM_EXIT_NMI] = nmi_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) [SVM_EXIT_SMI] = nop_on_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) [SVM_EXIT_INIT] = nop_on_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) [SVM_EXIT_VINTR] = interrupt_window_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) [SVM_EXIT_RDPMC] = rdpmc_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) [SVM_EXIT_CPUID] = cpuid_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) [SVM_EXIT_IRET] = iret_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) [SVM_EXIT_INVD] = invd_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) [SVM_EXIT_PAUSE] = pause_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) [SVM_EXIT_HLT] = halt_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) [SVM_EXIT_INVLPG] = invlpg_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) [SVM_EXIT_INVLPGA] = invlpga_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) [SVM_EXIT_IOIO] = io_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) [SVM_EXIT_MSR] = msr_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) [SVM_EXIT_SHUTDOWN] = shutdown_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) [SVM_EXIT_VMRUN] = vmrun_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) [SVM_EXIT_VMMCALL] = vmmcall_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) [SVM_EXIT_VMLOAD] = vmload_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) [SVM_EXIT_VMSAVE] = vmsave_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) [SVM_EXIT_STGI] = stgi_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) [SVM_EXIT_CLGI] = clgi_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) [SVM_EXIT_SKINIT] = skinit_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) [SVM_EXIT_WBINVD] = wbinvd_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) [SVM_EXIT_MONITOR] = monitor_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) [SVM_EXIT_MWAIT] = mwait_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) [SVM_EXIT_XSETBV] = xsetbv_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) [SVM_EXIT_RDPRU] = rdpru_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) [SVM_EXIT_INVPCID] = invpcid_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) [SVM_EXIT_NPF] = npf_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) [SVM_EXIT_RSM] = rsm_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) static void dump_vmcb(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) struct vmcb_control_area *control = &svm->vmcb->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) struct vmcb_save_area *save = &svm->vmcb->save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) if (!dump_invalid_vmcb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) pr_err("VMCB Control Area:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) pr_err("%-20s%08x %08x\n", "intercepts:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) control->intercepts[INTERCEPT_WORD3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) control->intercepts[INTERCEPT_WORD4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) pr_err("%-20s%d\n", "pause filter threshold:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) control->pause_filter_thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) pr_err("%-20s%d\n", "asid:", control->asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) pr_err("%-20s%08x\n", "int_state:", control->int_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) pr_err("VMCB State Save Area:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) "es:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) save->es.selector, save->es.attrib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) save->es.limit, save->es.base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) "cs:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) save->cs.selector, save->cs.attrib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) save->cs.limit, save->cs.base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) "ss:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) save->ss.selector, save->ss.attrib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) save->ss.limit, save->ss.base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) "ds:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) save->ds.selector, save->ds.attrib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) save->ds.limit, save->ds.base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) "fs:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) save->fs.selector, save->fs.attrib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) save->fs.limit, save->fs.base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) "gs:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) save->gs.selector, save->gs.attrib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) save->gs.limit, save->gs.base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) "gdtr:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) save->gdtr.selector, save->gdtr.attrib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) save->gdtr.limit, save->gdtr.base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) "ldtr:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) save->ldtr.selector, save->ldtr.attrib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) save->ldtr.limit, save->ldtr.base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) "idtr:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) save->idtr.selector, save->idtr.attrib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) save->idtr.limit, save->idtr.base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) "tr:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) save->tr.selector, save->tr.attrib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) save->tr.limit, save->tr.base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) pr_err("cpl: %d efer: %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) save->cpl, save->efer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) pr_err("%-15s %016llx %-13s %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) "cr0:", save->cr0, "cr2:", save->cr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) pr_err("%-15s %016llx %-13s %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) "cr3:", save->cr3, "cr4:", save->cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) pr_err("%-15s %016llx %-13s %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) "dr6:", save->dr6, "dr7:", save->dr7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) pr_err("%-15s %016llx %-13s %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) "rip:", save->rip, "rflags:", save->rflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) pr_err("%-15s %016llx %-13s %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) "rsp:", save->rsp, "rax:", save->rax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) pr_err("%-15s %016llx %-13s %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) "star:", save->star, "lstar:", save->lstar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) pr_err("%-15s %016llx %-13s %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) "cstar:", save->cstar, "sfmask:", save->sfmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) pr_err("%-15s %016llx %-13s %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) "kernel_gs_base:", save->kernel_gs_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) "sysenter_cs:", save->sysenter_cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) pr_err("%-15s %016llx %-13s %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) "sysenter_esp:", save->sysenter_esp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) "sysenter_eip:", save->sysenter_eip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) pr_err("%-15s %016llx %-13s %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) pr_err("%-15s %016llx %-13s %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) "br_from:", save->br_from, "br_to:", save->br_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) pr_err("%-15s %016llx %-13s %016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) "excp_from:", save->last_excp_from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) "excp_to:", save->last_excp_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) u32 *intr_info, u32 *error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) *info1 = control->exit_info_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) *info2 = control->exit_info_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) *intr_info = control->exit_int_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) if ((*intr_info & SVM_EXITINTINFO_VALID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) (*intr_info & SVM_EXITINTINFO_VALID_ERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) *error_code = control->exit_int_info_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) *error_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) struct kvm_run *kvm_run = vcpu->run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) u32 exit_code = svm->vmcb->control.exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) vcpu->arch.cr0 = svm->vmcb->save.cr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) if (npt_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) vcpu->arch.cr3 = svm->vmcb->save.cr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) if (is_guest_mode(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) int vmexit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) trace_kvm_nested_vmexit(exit_code, vcpu, KVM_ISA_SVM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) vmexit = nested_svm_exit_special(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) if (vmexit == NESTED_EXIT_CONTINUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) vmexit = nested_svm_exit_handled(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) if (vmexit == NESTED_EXIT_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) kvm_run->fail_entry.hardware_entry_failure_reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) = svm->vmcb->control.exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) dump_vmcb(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) "exit_code 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) __func__, svm->vmcb->control.exit_int_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) exit_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) if (exit_fastpath != EXIT_FASTPATH_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) || !svm_exit_handlers[exit_code]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) dump_vmcb(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) vcpu->run->internal.suberror =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) vcpu->run->internal.ndata = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) vcpu->run->internal.data[0] = exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) #ifdef CONFIG_RETPOLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) if (exit_code == SVM_EXIT_MSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) return msr_interception(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) else if (exit_code == SVM_EXIT_VINTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) return interrupt_window_interception(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) else if (exit_code == SVM_EXIT_INTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) return intr_interception(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) else if (exit_code == SVM_EXIT_HLT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) return halt_interception(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) else if (exit_code == SVM_EXIT_NPF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) return npf_interception(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) return svm_exit_handlers[exit_code](svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) static void reload_tss(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) sd->tss_desc->type = 9; /* available 32/64-bit TSS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) load_TR_desc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) static void pre_svm_run(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) struct svm_cpu_data *sd = per_cpu(svm_data, svm->vcpu.cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) if (sev_guest(svm->vcpu.kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) return pre_sev_run(svm, svm->vcpu.cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) /* FIXME: handle wraparound of asid_generation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) if (svm->asid_generation != sd->asid_generation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) new_asid(svm, sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) static void svm_inject_nmi(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) vcpu->arch.hflags |= HF_NMI_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) svm_set_intercept(svm, INTERCEPT_IRET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) ++vcpu->stat.nmi_injections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) static void svm_set_irq(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) BUG_ON(!(gif_set(svm)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) ++vcpu->stat.irq_injections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) if (nested_svm_virtualize_tpr(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) if (irr == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) if (tpr >= irr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) struct vmcb *vmcb = svm->vmcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) if (!gif_set(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) (svm->vcpu.arch.hflags & HF_NMI_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) if (svm->nested.nested_run_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) return !svm_nmi_blocked(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) if (masked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) svm->vcpu.arch.hflags |= HF_NMI_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) svm_set_intercept(svm, INTERCEPT_IRET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) svm_clr_intercept(svm, INTERCEPT_IRET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) struct vmcb *vmcb = svm->vmcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) if (!gif_set(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) if (is_guest_mode(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) /* As long as interrupts are being delivered... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) ? !(svm->nested.hsave->save.rflags & X86_EFLAGS_IF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) /* ... vmexits aren't blocked by the interrupt shadow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) if (nested_exit_on_intr(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) if (svm->nested.nested_run_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) * e.g. if the IRQ arrived asynchronously after checking nested events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) return !svm_interrupt_blocked(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) static void enable_irq_window(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) * 1, because that's a separate STGI/VMRUN intercept. The next time we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) * get that intercept, this function will be called again though and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) * we'll get the vintr intercept. However, if the vGIF feature is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) * enabled, the STGI interception will not occur. Enable the irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) * window under the assumption that the hardware will set the GIF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) if (vgif_enabled(svm) || gif_set(svm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) * IRQ window is not needed when AVIC is enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) * unless we have pending ExtINT since it cannot be injected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) * via AVIC. In such case, we need to temporarily disable AVIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) * and fallback to injecting IRQ via V_IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) svm_toggle_avic_for_irq_window(vcpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) svm_set_vintr(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) static void enable_nmi_window(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) == HF_NMI_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) return; /* IRET will cause a vm exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) if (!gif_set(svm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) if (vgif_enabled(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) svm_set_intercept(svm, INTERCEPT_STGI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) return; /* STGI will cause a vm exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) * Something prevents NMI from been injected. Single step over possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) * problem (IRET or exception injection or interrupt shadow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) svm->nmi_singlestep = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) void svm_flush_tlb(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) * Flush only the current ASID even if the TLB flush was invoked via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) * unconditionally does a TLB flush on both nested VM-Enter and nested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) * VM-Exit (via kvm_mmu_reset_context()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) svm->asid_generation--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) invlpga(gva, svm->vmcb->control.asid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) if (nested_svm_virtualize_tpr(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) kvm_set_cr8(vcpu, cr8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) u64 cr8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) if (nested_svm_virtualize_tpr(vcpu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) kvm_vcpu_apicv_active(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) cr8 = kvm_get_cr8(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) static void svm_complete_interrupts(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) u8 vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) u32 exitintinfo = svm->vmcb->control.exit_int_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) unsigned int3_injected = svm->int3_injected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) svm->int3_injected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) * If we've made progress since setting HF_IRET_MASK, we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) * executed an IRET and can allow NMI injection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) svm->vcpu.arch.nmi_injected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) kvm_clear_exception_queue(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) kvm_clear_interrupt_queue(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) if (!(exitintinfo & SVM_EXITINTINFO_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) case SVM_EXITINTINFO_TYPE_NMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) svm->vcpu.arch.nmi_injected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) case SVM_EXITINTINFO_TYPE_EXEPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) * In case of software exceptions, do not reinject the vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) * but re-execute the instruction instead. Rewind RIP first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) * if we emulated INT3 before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) if (kvm_exception_is_soft(vector)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) if (vector == BP_VECTOR && int3_injected &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) kvm_rip_write(&svm->vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) kvm_rip_read(&svm->vcpu) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) int3_injected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) u32 err = svm->vmcb->control.exit_int_info_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) kvm_requeue_exception_e(&svm->vcpu, vector, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) kvm_requeue_exception(&svm->vcpu, vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) case SVM_EXITINTINFO_TYPE_INTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) kvm_queue_interrupt(&svm->vcpu, vector, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) static void svm_cancel_injection(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) struct vmcb_control_area *control = &svm->vmcb->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) control->exit_int_info = control->event_inj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) control->exit_int_info_err = control->event_inj_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) control->event_inj = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) svm_complete_interrupts(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) to_svm(vcpu)->vmcb->control.exit_info_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) return handle_fastpath_set_msr_irqoff(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) return EXIT_FASTPATH_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) * VMENTER enables interrupts (host state), but the kernel state is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) * interrupts disabled when this is invoked. Also tell RCU about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) * it. This is the same logic as for exit_to_user_mode().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) * This ensures that e.g. latency analysis on the host observes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) * guest mode as interrupt enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) * guest_enter_irqoff() informs context tracking about the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) * transition to guest mode and if enabled adjusts RCU state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) * accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) instrumentation_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) trace_hardirqs_on_prepare();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) lockdep_hardirqs_on_prepare(CALLER_ADDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) instrumentation_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) guest_enter_irqoff();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) lockdep_hardirqs_on(CALLER_ADDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) native_wrmsrl(MSR_GS_BASE, svm->host.gs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) loadsegment(fs, svm->host.fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) #ifndef CONFIG_X86_32_LAZY_GS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) loadsegment(gs, svm->host.gs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) * VMEXIT disables interrupts (host state), but tracing and lockdep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) * have them in state 'on' as recorded before entering guest mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) * Same as enter_from_user_mode().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) * context_tracking_guest_exit() restores host context and reinstates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) * RCU if enabled and required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) * This needs to be done before the below as native_read_msr()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) * contains a tracepoint and x86_spec_ctrl_restore_host() calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) * into world and some more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) lockdep_hardirqs_off(CALLER_ADDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) context_tracking_guest_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) instrumentation_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) trace_hardirqs_off_finish();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) instrumentation_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) * Disable singlestep if we're injecting an interrupt/exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) * We don't want our modified rflags to be pushed on the stack where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) * we might not be able to easily reset them if we disabled NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) * singlestep later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) * Event injection happens before external interrupts cause a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) * vmexit and interrupts are disabled here, so smp_send_reschedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) * is enough to force an immediate vmexit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) disable_nmi_singlestep(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) smp_send_reschedule(vcpu->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) pre_svm_run(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) sync_lapic_to_cr8(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) svm->vmcb->save.cr2 = vcpu->arch.cr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) * Run with all-zero DR6 unless needed, so that we can get the exact cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) * of a #DB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) svm_set_dr6(svm, vcpu->arch.dr6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) clgi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) kvm_load_guest_xsave_state(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) kvm_wait_lapic_expire(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) * If this vCPU has touched SPEC_CTRL, restore the guest's value if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) * it's non-zero. Since vmentry is serialising on affected CPUs, there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) * is no need to worry about the conditional branch over the wrmsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) * being speculatively taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) svm_vcpu_enter_exit(vcpu, svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) * We do not use IBRS in the kernel. If this vCPU has used the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) * SPEC_CTRL MSR it may have left it on; save the value and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) * turn it off. This is much more efficient than blindly adding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) * it to the atomic save/restore list. Especially as the former
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) * For non-nested case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) * If the L01 MSR bitmap does not intercept the MSR, then we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) * save it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) * For nested case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) * If the L02 MSR bitmap does not intercept the MSR, then we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) * save it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) reload_tss(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) vcpu->arch.cr2 = svm->vmcb->save.cr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) kvm_before_interrupt(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) kvm_load_host_xsave_state(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) stgi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) /* Any pending NMI will happen here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) kvm_after_interrupt(&svm->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) sync_cr8_to_lapic(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) svm->next_rip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) if (is_guest_mode(&svm->vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) sync_nested_vmcb_control(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) svm->nested.nested_run_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) vmcb_mark_all_clean(svm->vmcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) /* if exit due to PF check for async PF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) svm->vcpu.arch.apf.host_apf_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) kvm_read_and_reset_apf_flags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) if (npt_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) * We need to handle MC intercepts here before the vcpu has a chance to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) * change the physical cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) if (unlikely(svm->vmcb->control.exit_code ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) SVM_EXIT_EXCP_BASE + MC_VECTOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) svm_handle_mce(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) svm_complete_interrupts(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) if (is_guest_mode(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) return EXIT_FASTPATH_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) return svm_exit_handlers_fastpath(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) int root_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) unsigned long cr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) cr3 = __sme_set(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) if (npt_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) svm->vmcb->control.nested_cr3 = cr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) /* Loading L2's CR3 is handled by enter_svm_guest_mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) cr3 = vcpu->arch.cr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) svm->vmcb->save.cr3 = cr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) vmcb_mark_dirty(svm->vmcb, VMCB_CR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) static int is_disabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) u64 vm_cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) rdmsrl(MSR_VM_CR, vm_cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) * Patch in the VMMCALL instruction:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) hypercall[0] = 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) hypercall[1] = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) hypercall[2] = 0xd9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) static int __init svm_check_processor_compat(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) static bool svm_cpu_has_accelerated_tpr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) static bool svm_has_emulated_msr(u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) switch (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) case MSR_IA32_MCG_EXT_CTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) struct kvm_cpuid_entry2 *best;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) boot_cpu_has(X86_FEATURE_XSAVE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) boot_cpu_has(X86_FEATURE_XSAVES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) /* Update nrips enabled cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) /* Check again if INVPCID interception if required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) svm_check_invpcid(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) /* For sev guests, the memory encryption bit is not reserved in CR3. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) if (sev_guest(vcpu->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) if (best)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) vcpu->arch.cr3_lm_rsvd_bits &= ~(1UL << (best->ebx & 0x3f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) if (!kvm_vcpu_apicv_active(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) * AVIC does not work with an x2APIC mode guest. If the X2APIC feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) * is exposed to the guest, disable AVIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) kvm_request_apicv_update(vcpu->kvm, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) APICV_INHIBIT_REASON_X2APIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) * Currently, AVIC does not work with nested virtualization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) kvm_request_apicv_update(vcpu->kvm, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) APICV_INHIBIT_REASON_NESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) static bool svm_has_wbinvd_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) #define PRE_EX(exit) { .exit_code = (exit), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) .stage = X86_ICPT_PRE_EXCEPT, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) #define POST_EX(exit) { .exit_code = (exit), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) .stage = X86_ICPT_POST_EXCEPT, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) #define POST_MEM(exit) { .exit_code = (exit), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) .stage = X86_ICPT_POST_MEMACCESS, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) static const struct __x86_intercept {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) u32 exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) enum x86_intercept_stage stage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) } x86_intercept_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) [x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) #undef PRE_EX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) #undef POST_EX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) #undef POST_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) static int svm_check_intercept(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) struct x86_instruction_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) enum x86_intercept_stage stage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) struct x86_exception *exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) int vmexit, ret = X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) struct __x86_intercept icpt_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) struct vmcb *vmcb = svm->vmcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) icpt_info = x86_intercept_map[info->intercept];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) if (stage != icpt_info.stage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) switch (icpt_info.exit_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) case SVM_EXIT_READ_CR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) if (info->intercept == x86_intercept_cr_read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) icpt_info.exit_code += info->modrm_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) case SVM_EXIT_WRITE_CR0: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) unsigned long cr0, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) if (info->intercept == x86_intercept_cr_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) icpt_info.exit_code += info->modrm_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) info->intercept == x86_intercept_clts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) if (!(vmcb_is_intercept(&svm->nested.ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) INTERCEPT_SELECTIVE_CR0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) if (info->intercept == x86_intercept_lmsw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) cr0 &= 0xfUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) val &= 0xfUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) /* lmsw can't clear PE - catch this here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) if (cr0 & X86_CR0_PE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) val |= X86_CR0_PE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) if (cr0 ^ val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) case SVM_EXIT_READ_DR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) case SVM_EXIT_WRITE_DR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) icpt_info.exit_code += info->modrm_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) case SVM_EXIT_MSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) if (info->intercept == x86_intercept_wrmsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) vmcb->control.exit_info_1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) vmcb->control.exit_info_1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) case SVM_EXIT_PAUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) * We get this for NOP only, but pause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) * is rep not, check this here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) if (info->rep_prefix != REPE_PREFIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) case SVM_EXIT_IOIO: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) u64 exit_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) u32 bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) if (info->intercept == x86_intercept_in ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) info->intercept == x86_intercept_ins) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) exit_info = ((info->src_val & 0xffff) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) SVM_IOIO_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) bytes = info->dst_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) exit_info = (info->dst_val & 0xffff) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) bytes = info->src_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) if (info->intercept == x86_intercept_outs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) info->intercept == x86_intercept_ins)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) exit_info |= SVM_IOIO_STR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) if (info->rep_prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) exit_info |= SVM_IOIO_REP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) bytes = min(bytes, 4u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) vmcb->control.exit_info_1 = exit_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) vmcb->control.exit_info_2 = info->next_rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) if (static_cpu_has(X86_FEATURE_NRIPS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) vmcb->control.next_rip = info->next_rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) vmcb->control.exit_code = icpt_info.exit_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) vmexit = nested_svm_exit_handled(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) : X86EMUL_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) if (!kvm_pause_in_guest(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) shrink_ple_window(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) static void svm_setup_mce(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) /* [63:9] are reserved. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) vcpu->arch.mcg_cap &= 0x1ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) bool svm_smi_blocked(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) /* Per APM Vol.2 15.22.2 "Response to SMI" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) if (!gif_set(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) return is_smm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) if (svm->nested.nested_run_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) return !svm_smi_blocked(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) if (is_guest_mode(vcpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) /* FED8h - SVM Guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) put_smstate(u64, smstate, 0x7ed8, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) /* FEE0h - SVM Guest VMCB Physical Address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) ret = nested_svm_vmexit(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) struct kvm_host_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) if (guest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) if (!(saved_efer & EFER_SVME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) if (kvm_vcpu_map(&svm->vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) if (svm_allocate_nested(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) ret = enter_svm_guest_mode(svm, vmcb12_gpa, map.hva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) kvm_vcpu_unmap(&svm->vcpu, &map, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) static void enable_smi_window(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) if (!gif_set(svm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) if (vgif_enabled(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) svm_set_intercept(svm, INTERCEPT_STGI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) /* STGI will cause a vm exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) /* We must be in SMM; RSM will cause a vmexit anyway. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) bool smep, smap, is_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) unsigned long cr4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) /* Emulation is always possible when KVM has access to all guest state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) if (!sev_guest(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) * Errata:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) * possible that CPU microcode implementing DecodeAssist will fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) * to read bytes of instruction which caused #NPF. In this case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) * return 0 instead of the correct guest instruction bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) * This happens because CPU microcode reading instruction bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) * uses a special opcode which attempts to read data using CPL=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) * priviledges. The microcode reads CS:RIP and if it hits a SMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) * fault, it gives up and returns no instruction bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) * Detection:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) * We reach here in case CPU supports DecodeAssist, raised #NPF and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) * returned 0 in GuestIntrBytes field of the VMCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) * First, errata can only be triggered in case vCPU CR4.SMAP=1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) * Second, if vCPU CR4.SMEP=1, errata could only be triggered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) * in case vCPU CPL==3 (Because otherwise guest would have triggered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) * a SMEP fault instead of #NPF).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) * As most guests enable SMAP if they have also enabled SMEP, use above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) * logic in order to attempt minimize false-positive of detecting errata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) * while still preserving all cases semantic correctness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) * Workaround:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) * To determine what instruction the guest was executing, the hypervisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) * will have to decode the instruction at the instruction pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) * In non SEV guest, hypervisor will be able to read the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) * memory to decode the instruction pointer when insn_len is zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) * so we return true to indicate that decoding is possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) * But in the SEV guest, the guest memory is encrypted with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) * guest specific key and hypervisor will not be able to decode the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) * instruction pointer so we will not able to workaround it. Lets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) * print the error and request to kill the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) if (likely(!insn || insn_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) cr4 = kvm_read_cr4(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) smep = cr4 & X86_CR4_SMEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) smap = cr4 & X86_CR4_SMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) is_user = svm_get_cpl(vcpu) == 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) if (smap && (!smep || is_user)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) * If the fault occurred in userspace, arbitrarily inject #GP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) * to avoid killing the guest and to hopefully avoid confusing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) * the guest kernel too much, e.g. injecting #PF would not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) * coherent with respect to the guest's page tables. Request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) * triple fault if the fault occurred in the kernel as there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) * no fault that KVM can inject without confusing the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) * In practice, the triple fault is moot as no sane SEV kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) * will execute from user memory while also running with SMAP=1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) if (is_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) kvm_inject_gp(vcpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) * TODO: Last condition latch INIT signals on vCPU when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) * vCPU is in guest-mode and vmcb12 defines intercept on INIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) * To properly emulate the INIT intercept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) * svm_check_nested_events() should call nested_svm_vmexit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) * if an INIT signal is pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) return !gif_set(svm) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) static void svm_vm_destroy(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) avic_vm_destroy(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) sev_vm_destroy(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) static int svm_vm_init(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) if (!pause_filter_count || !pause_filter_thresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) kvm->arch.pause_in_guest = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) if (avic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) int ret = avic_vm_init(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) kvm_apicv_init(kvm, avic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) static struct kvm_x86_ops svm_x86_ops __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) .hardware_unsetup = svm_hardware_teardown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) .hardware_enable = svm_hardware_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) .hardware_disable = svm_hardware_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) .has_emulated_msr = svm_has_emulated_msr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) .vcpu_create = svm_create_vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) .vcpu_free = svm_free_vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) .vcpu_reset = svm_vcpu_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) .vm_size = sizeof(struct kvm_svm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) .vm_init = svm_vm_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) .vm_destroy = svm_vm_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) .prepare_guest_switch = svm_prepare_guest_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) .vcpu_load = svm_vcpu_load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) .vcpu_put = svm_vcpu_put,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) .vcpu_blocking = svm_vcpu_blocking,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) .vcpu_unblocking = svm_vcpu_unblocking,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) .update_exception_bitmap = update_exception_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) .get_msr_feature = svm_get_msr_feature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) .get_msr = svm_get_msr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) .set_msr = svm_set_msr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) .get_segment_base = svm_get_segment_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) .get_segment = svm_get_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) .set_segment = svm_set_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) .get_cpl = svm_get_cpl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) .set_cr0 = svm_set_cr0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) .set_cr4 = svm_set_cr4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) .set_efer = svm_set_efer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) .get_idt = svm_get_idt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) .set_idt = svm_set_idt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) .get_gdt = svm_get_gdt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) .set_gdt = svm_set_gdt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) .set_dr7 = svm_set_dr7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) .cache_reg = svm_cache_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) .get_rflags = svm_get_rflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) .set_rflags = svm_set_rflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) .tlb_flush_all = svm_flush_tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) .tlb_flush_current = svm_flush_tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) .tlb_flush_gva = svm_flush_tlb_gva,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) .tlb_flush_guest = svm_flush_tlb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) .run = svm_vcpu_run,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) .handle_exit = handle_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) .skip_emulated_instruction = skip_emulated_instruction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) .update_emulated_instruction = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) .set_interrupt_shadow = svm_set_interrupt_shadow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) .get_interrupt_shadow = svm_get_interrupt_shadow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) .patch_hypercall = svm_patch_hypercall,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) .set_irq = svm_set_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) .set_nmi = svm_inject_nmi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) .queue_exception = svm_queue_exception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) .cancel_injection = svm_cancel_injection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) .interrupt_allowed = svm_interrupt_allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) .nmi_allowed = svm_nmi_allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) .get_nmi_mask = svm_get_nmi_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) .set_nmi_mask = svm_set_nmi_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) .enable_nmi_window = enable_nmi_window,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) .enable_irq_window = enable_irq_window,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) .update_cr8_intercept = update_cr8_intercept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) .set_virtual_apic_mode = svm_set_virtual_apic_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) .pre_update_apicv_exec_ctrl = svm_pre_update_apicv_exec_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) .load_eoi_exitmap = svm_load_eoi_exitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) .hwapic_irr_update = svm_hwapic_irr_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) .hwapic_isr_update = svm_hwapic_isr_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) .sync_pir_to_irr = kvm_lapic_find_highest_irr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) .apicv_post_state_restore = avic_post_state_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) .set_tss_addr = svm_set_tss_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) .set_identity_map_addr = svm_set_identity_map_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) .get_mt_mask = svm_get_mt_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) .get_exit_info = svm_get_exit_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) .has_wbinvd_exit = svm_has_wbinvd_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) .write_l1_tsc_offset = svm_write_l1_tsc_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) .load_mmu_pgd = svm_load_mmu_pgd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) .check_intercept = svm_check_intercept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) .handle_exit_irqoff = svm_handle_exit_irqoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) .request_immediate_exit = __kvm_request_immediate_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) .sched_in = svm_sched_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) .pmu_ops = &amd_pmu_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) .nested_ops = &svm_nested_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) .deliver_posted_interrupt = svm_deliver_avic_intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) .update_pi_irte = svm_update_pi_irte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) .setup_mce = svm_setup_mce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) .smi_allowed = svm_smi_allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) .pre_enter_smm = svm_pre_enter_smm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) .pre_leave_smm = svm_pre_leave_smm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) .enable_smi_window = enable_smi_window,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) .mem_enc_op = svm_mem_enc_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) .mem_enc_reg_region = svm_register_enc_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) .mem_enc_unreg_region = svm_unregister_enc_region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) .can_emulate_instruction = svm_can_emulate_instruction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) .apic_init_signal_blocked = svm_apic_init_signal_blocked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) .msr_filter_changed = svm_msr_filter_changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) static struct kvm_x86_init_ops svm_init_ops __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) .cpu_has_kvm_support = has_svm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) .disabled_by_bios = is_disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) .hardware_setup = svm_hardware_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) .check_processor_compatibility = svm_check_processor_compat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) .runtime_ops = &svm_x86_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) static int __init svm_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) __unused_size_checks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) __alignof__(struct vcpu_svm), THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) static void __exit svm_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) kvm_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) module_init(svm_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) module_exit(svm_exit)