Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef __KVM_X86_VMX_EVMCS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define __KVM_X86_VMX_EVMCS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/jump_label.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <asm/hyperv-tlfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <asm/mshyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/vmx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include "capabilities.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "vmcs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include "vmcs12.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) struct vmcs_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) DECLARE_STATIC_KEY_FALSE(enable_evmcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define KVM_EVMCS_VERSION 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * Enlightened VMCSv1 doesn't support these:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *	POSTED_INTR_NV                  = 0x00000002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *	GUEST_INTR_STATUS               = 0x00000810,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *	APIC_ACCESS_ADDR		= 0x00002014,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *	POSTED_INTR_DESC_ADDR           = 0x00002016,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *	EOI_EXIT_BITMAP0                = 0x0000201c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *	EOI_EXIT_BITMAP1                = 0x0000201e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *	EOI_EXIT_BITMAP2                = 0x00002020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *	EOI_EXIT_BITMAP3                = 0x00002022,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  *	GUEST_PML_INDEX			= 0x00000812,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *	PML_ADDRESS			= 0x0000200e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *	VM_FUNCTION_CONTROL             = 0x00002018,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *	EPTP_LIST_ADDRESS               = 0x00002024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  *	VMREAD_BITMAP                   = 0x00002026,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  *	VMWRITE_BITMAP                  = 0x00002028,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  *	TSC_MULTIPLIER                  = 0x00002032,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  *	PLE_GAP                         = 0x00004020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  *	PLE_WINDOW                      = 0x00004022,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *	VMX_PREEMPTION_TIMER_VALUE      = 0x0000482E,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  *      GUEST_IA32_PERF_GLOBAL_CTRL     = 0x00002808,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *      HOST_IA32_PERF_GLOBAL_CTRL      = 0x00002c04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * Currently unsupported in KVM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *	GUEST_IA32_RTIT_CTL		= 0x00002814,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 				    PIN_BASED_VMX_PREEMPTION_TIMER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define EVMCS1_UNSUPPORTED_2NDEXEC					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	(SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	 SECONDARY_EXEC_APIC_REGISTER_VIRT |				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	 SECONDARY_EXEC_ENABLE_PML |					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	 SECONDARY_EXEC_ENABLE_VMFUNC |					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	 SECONDARY_EXEC_SHADOW_VMCS |					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	 SECONDARY_EXEC_TSC_SCALING |					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	 SECONDARY_EXEC_PAUSE_LOOP_EXITING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define EVMCS1_UNSUPPORTED_VMEXIT_CTRL					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #if IS_ENABLED(CONFIG_HYPERV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) struct evmcs_field {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	u16 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	u16 clean_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) extern const struct evmcs_field vmcs_field_to_evmcs_1[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) extern const unsigned int nr_evmcs_1_fields;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) static __always_inline int get_evmcs_offset(unsigned long field,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 					    u16 *clean_field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	unsigned int index = ROL16(field, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	const struct evmcs_field *evmcs_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if (unlikely(index >= nr_evmcs_1_fields)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		WARN_ONCE(1, "KVM: accessing unsupported EVMCS field %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			  field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	evmcs_field = &vmcs_field_to_evmcs_1[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	if (clean_field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		*clean_field = evmcs_field->clean_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	return evmcs_field->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #undef ROL16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static inline void evmcs_write64(unsigned long field, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	u16 clean_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	int offset = get_evmcs_offset(field, &clean_field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (offset < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	*(u64 *)((char *)current_evmcs + offset) = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	current_evmcs->hv_clean_fields &= ~clean_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline void evmcs_write32(unsigned long field, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	u16 clean_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	int offset = get_evmcs_offset(field, &clean_field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	if (offset < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	*(u32 *)((char *)current_evmcs + offset) = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	current_evmcs->hv_clean_fields &= ~clean_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static inline void evmcs_write16(unsigned long field, u16 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	u16 clean_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	int offset = get_evmcs_offset(field, &clean_field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	if (offset < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	*(u16 *)((char *)current_evmcs + offset) = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	current_evmcs->hv_clean_fields &= ~clean_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static inline u64 evmcs_read64(unsigned long field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	int offset = get_evmcs_offset(field, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	if (offset < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	return *(u64 *)((char *)current_evmcs + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static inline u32 evmcs_read32(unsigned long field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	int offset = get_evmcs_offset(field, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	if (offset < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	return *(u32 *)((char *)current_evmcs + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static inline u16 evmcs_read16(unsigned long field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	int offset = get_evmcs_offset(field, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	if (offset < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	return *(u16 *)((char *)current_evmcs + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static inline void evmcs_touch_msr_bitmap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	if (unlikely(!current_evmcs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	if (current_evmcs->hv_enlightenments_control.msr_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		current_evmcs->hv_clean_fields &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static inline void evmcs_load(u64 phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	struct hv_vp_assist_page *vp_ap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		hv_get_vp_assist_page(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		vp_ap->nested_control.features.directhypercall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	vp_ap->current_nested_vmcs = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	vp_ap->enlighten_vmentry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) __init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #else /* !IS_ENABLED(CONFIG_HYPERV) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static inline void evmcs_write64(unsigned long field, u64 value) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static inline void evmcs_write32(unsigned long field, u32 value) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline void evmcs_write16(unsigned long field, u16 value) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static inline u64 evmcs_read64(unsigned long field) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static inline u32 evmcs_read32(unsigned long field) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static inline u16 evmcs_read16(unsigned long field) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static inline void evmcs_load(u64 phys_addr) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static inline void evmcs_touch_msr_bitmap(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #endif /* IS_ENABLED(CONFIG_HYPERV) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) enum nested_evmptrld_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	EVMPTRLD_DISABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	EVMPTRLD_SUCCEEDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	EVMPTRLD_VMFAIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	EVMPTRLD_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) int nested_enable_evmcs(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 			uint16_t *vmcs_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #endif /* __KVM_X86_VMX_EVMCS_H */