^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __KVM_X86_VMX_NESTED_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __KVM_X86_VMX_NESTED_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include "kvm_cache_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "vmcs12.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "vmx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Status returned by nested_vmx_enter_non_root_mode():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) enum nvmx_vmentry_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) NVMX_VMENTRY_SUCCESS, /* Entered VMX non-root mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) NVMX_VMENTRY_VMFAIL, /* Consistency check VMFail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) NVMX_VMENTRY_VMEXIT, /* Consistency check VMExit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) NVMX_VMENTRY_KVM_INTERNAL_ERROR,/* KVM internal error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) void vmx_leave_nested(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) void nested_vmx_hardware_unsetup(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) void nested_vmx_set_vmcs_shadowing_bitmap(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) bool from_vmentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u32 exit_intr_info, unsigned long exit_qualification);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return to_vmx(vcpu)->nested.cached_vmcs12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * Note: the same condition is checked against the state provided by userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * in vmx_set_nested_state; if it is satisfied, the nested state must include
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * the VMCS12.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct vcpu_vmx *vmx = to_vmx(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * In case we do two consecutive get/set_nested_state()s while L2 was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * running hv_evmcs may end up not being mapped (we map it from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * have vmcs12 if it is true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) vmx->nested.hv_evmcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static inline u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct vcpu_vmx *vmx = to_vmx(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static inline unsigned long nested_ept_get_eptp(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* return the page table to be shadowed - in our case, EPT12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return get_vmcs12(vcpu)->ept_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return nested_ept_get_eptp(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * Return the cr0 value that a nested guest would read. This is a combination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * its hypervisor (cr0_read_shadow).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * to modify any valid field of the VMCS, or are the VM-exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * information fields read-only?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return to_vmx(vcpu)->nested.msrs.misc_low &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) CPU_BASED_MONITOR_TRAP_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) SECONDARY_EXEC_SHADOW_VMCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return vmcs12->cpu_based_vm_exec_control & bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return (vmcs12->cpu_based_vm_exec_control &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) (vmcs12->secondary_vm_exec_control & bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return vmcs12->pin_based_vm_exec_control &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) PIN_BASED_VMX_PREEMPTION_TIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static inline int nested_cpu_has_mtf(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return nested_cpu_has_vmfunc(vmcs12) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) (vmcs12->vm_function_control &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) VMX_VMFUNC_EPTP_SWITCHING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static inline bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return vmcs12->vm_exit_controls &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static inline bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * In nested virtualization, check if L1 asked to exit on external interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * For most existing hypervisors, this will always return true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return get_vmcs12(vcpu)->pin_based_vm_exec_control &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) PIN_BASED_EXT_INTR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * if fixed0[i] == 1: val[i] must be 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * if fixed1[i] == 0: val[i] must be 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return ((val & fixed1) | fixed0) == val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static inline bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) SECONDARY_EXEC_UNRESTRICTED_GUEST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return fixed_bits_valid(val, fixed0, fixed1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static inline bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return fixed_bits_valid(val, fixed0, fixed1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static inline bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return fixed_bits_valid(val, fixed0, fixed1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* No difference in the restrictions on guest and host CR4 in VMX operation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #define nested_guest_cr4_valid nested_cr4_valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #define nested_host_cr4_valid nested_cr4_valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) extern struct kvm_x86_nested_ops vmx_nested_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #endif /* __KVM_X86_VMX_NESTED_H */