Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef ARCH_X86_KVM_X86_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define ARCH_X86_KVM_X86_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <asm/pvclock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include "kvm_cache_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include "kvm_emulate.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #define KVM_DEFAULT_PLE_GAP		128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #define KVM_VMX_DEFAULT_PLE_WINDOW	4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #define KVM_DEFAULT_PLE_WINDOW_GROW	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #define KVM_DEFAULT_PLE_WINDOW_SHRINK	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX	UINT_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX	USHRT_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define KVM_SVM_DEFAULT_PLE_WINDOW	3000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) static inline unsigned int __grow_ple_window(unsigned int val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 		unsigned int base, unsigned int modifier, unsigned int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	u64 ret = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	if (modifier < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 		return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	if (modifier < base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 		ret *= modifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 		ret += modifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	return min(ret, (u64)max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static inline unsigned int __shrink_ple_window(unsigned int val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		unsigned int base, unsigned int modifier, unsigned int min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	if (modifier < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	if (modifier < base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		val /= modifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		val -= modifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	return max(val, min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	vcpu->arch.exception.pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	vcpu->arch.exception.injected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	bool soft)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	vcpu->arch.interrupt.injected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	vcpu->arch.interrupt.soft = soft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	vcpu->arch.interrupt.nr = vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	vcpu->arch.interrupt.injected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		vcpu->arch.nmi_injected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static inline bool kvm_exception_is_soft(unsigned int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) static inline bool is_protmode(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) static inline int is_long_mode(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	return vcpu->arch.efer & EFER_LMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	int cs_db, cs_l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	if (!is_long_mode(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	return cs_l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	return (vcpu->arch.efer & EFER_LMA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		 kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static inline bool x86_exception_has_error_code(unsigned int vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			BIT(PF_VECTOR) | BIT(AC_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	return (1U << vector) & exception_has_error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	++vcpu->stat.tlb_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	kvm_x86_ops.tlb_flush_current(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static inline int is_pae(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static inline int is_pse(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static inline int is_paging(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static inline u64 get_canonical(u64 la, u8 vaddr_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 					gva_t gva, gfn_t gfn, unsigned access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	u64 gen = kvm_memslots(vcpu->kvm)->generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	 * If this is a shadow nested page table, the "GVA" is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	 * actually a nGPA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	vcpu->arch.mmio_access = access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	vcpu->arch.mmio_gfn = gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	vcpu->arch.mmio_gen = gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  * clear all mmio cache info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define MMIO_GVA_ANY (~(gva_t)0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	vcpu->arch.mmio_gva = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	unsigned long val = kvm_register_read(vcpu, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	return is_64_bit_mode(vcpu) ? val : (u32)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 				       int reg, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (!is_64_bit_mode(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		val = (u32)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	return kvm_register_write(vcpu, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	return !(kvm->arch.disabled_quirks & quirk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) u64 get_kvmclock_ns(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	gva_t addr, void *val, unsigned int bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	struct x86_exception *exception);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	gva_t addr, void *val, unsigned int bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	struct x86_exception *exception);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int handle_ud(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 					  int page_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) bool kvm_vector_hashing_enabled(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 				    void *insn, int insn_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 			    int emulation_type, void *insn, int insn_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) extern u64 host_xcr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) extern u64 supported_xcr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) extern u64 supported_xss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static inline bool kvm_mpx_supported(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	return (supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		== (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) extern unsigned int min_timer_period_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) extern bool enable_vmware_backdoor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) extern int pi_inject_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) extern struct static_key kvm_no_apic_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 				   vcpu->arch.virtual_tsc_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* Same "calling convention" as do_div:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  * - divide (n << 32) by base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  * - put result in n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  * - return remainder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #define do_shl32_div32(n, base)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	({							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	    u32 __quot, __rem;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	    asm("divl %2" : "=a" (__quot), "=d" (__rem)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			: "rm" (base), "0" (0), "1" ((u32) n));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	    n = __quot;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	    __rem;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	 })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static inline bool kvm_mwait_in_guest(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	return kvm->arch.mwait_in_guest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static inline bool kvm_hlt_in_guest(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	return kvm->arch.hlt_in_guest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static inline bool kvm_pause_in_guest(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	return kvm->arch.pause_in_guest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static inline bool kvm_cstate_in_guest(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	return kvm->arch.cstate_in_guest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	__this_cpu_write(current_vcpu, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	__this_cpu_write(current_vcpu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static inline bool kvm_pat_valid(u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	if (data & 0xF8F8F8F8F8F8F8F8ull)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	/* 0, 1, 4, 5, 6, 7 are valid values.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static inline bool kvm_dr7_valid(u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	/* Bits [63:32] are reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	return !(data >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static inline bool kvm_dr6_valid(u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	/* Bits [63:32] are reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	return !(data >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int kvm_spec_ctrl_test_value(u64 value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 			      struct x86_exception *e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)  * Internal error codes that are used to indicate that MSR emulation encountered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)  * an error that should result in #GP in the guest, unless userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)  * handles it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) #define  KVM_MSR_RET_INVALID	2	/* in-kernel MSR emulation #GP condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #define  KVM_MSR_RET_FILTERED	3	/* #GP due to userspace MSR filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #define __cr4_reserved_bits(__cpu_has, __c)             \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ({                                                      \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	u64 __reserved_bits = CR4_RESERVED_BITS;        \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)                                                         \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	if (!__cpu_has(__c, X86_FEATURE_XSAVE))         \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		__reserved_bits |= X86_CR4_OSXSAVE;     \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	if (!__cpu_has(__c, X86_FEATURE_SMEP))          \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		__reserved_bits |= X86_CR4_SMEP;        \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	if (!__cpu_has(__c, X86_FEATURE_SMAP))          \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		__reserved_bits |= X86_CR4_SMAP;        \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	if (!__cpu_has(__c, X86_FEATURE_FSGSBASE))      \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		__reserved_bits |= X86_CR4_FSGSBASE;    \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	if (!__cpu_has(__c, X86_FEATURE_PKU))           \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		__reserved_bits |= X86_CR4_PKE;         \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	if (!__cpu_has(__c, X86_FEATURE_LA57))          \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		__reserved_bits |= X86_CR4_LA57;        \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	if (!__cpu_has(__c, X86_FEATURE_UMIP))          \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		__reserved_bits |= X86_CR4_UMIP;        \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	if (!__cpu_has(__c, X86_FEATURE_VMX))           \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		__reserved_bits |= X86_CR4_VMXE;        \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	if (!__cpu_has(__c, X86_FEATURE_PCID))          \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		__reserved_bits |= X86_CR4_PCIDE;       \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	__reserved_bits;                                \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) #endif