Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Kernel-based Virtual Machine driver for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * AMD SVM support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 2006 Qumranet, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *   Yaniv Kamay  <yaniv@qumranet.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *   Avi Kivity   <avi@qumranet.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #ifndef __SVM_SVM_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define __SVM_SVM_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/kvm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/svm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) static const u32 host_save_user_msrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	MSR_FS_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	MSR_TSC_AUX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define MAX_DIRECT_ACCESS_MSRS	15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define MSRPM_OFFSETS	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) extern bool npt_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 			    pause filter count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	VMCB_ASID,	 /* ASID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	VMCB_INTR,	 /* int_ctl, int_vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	VMCB_NPT,        /* npt_en, nCR3, gPAT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	VMCB_CR,	 /* CR0, CR3, CR4, EFER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	VMCB_DR,         /* DR6, DR7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	VMCB_DT,         /* GDT, IDT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	VMCB_SEG,        /* CS, DS, SS, ES, CPL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	VMCB_CR2,        /* CR2 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	VMCB_AVIC,       /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 			  * AVIC PHYSICAL_TABLE pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 			  * AVIC LOGICAL_TABLE pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 			  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	VMCB_DIRTY_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) /* TPR and CR2 are always written before VMRUN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define VMCB_ALWAYS_DIRTY_MASK	((1U << VMCB_INTR) | (1U << VMCB_CR2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) struct kvm_sev_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	bool active;		/* SEV enabled guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	unsigned int asid;	/* ASID used for this guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	unsigned int handle;	/* SEV firmware handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	int fd;			/* SEV device fd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	unsigned long pages_locked; /* Number of pages locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct list_head regions_list;  /* List of registered regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) struct kvm_svm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct kvm kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	/* Struct members for AVIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	u32 avic_vm_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	struct page *avic_logical_id_table_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	struct page *avic_physical_id_table_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	struct hlist_node hnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	struct kvm_sev_info sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) struct kvm_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) struct svm_nested_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	struct vmcb *hsave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	u64 hsave_msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	u64 vm_cr_msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	u64 vmcb12_gpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	/* These are the merged vectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	u32 *msrpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	/* A VMRUN has started but has not yet been performed, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	 * we cannot inject a nested vmexit yet.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	bool nested_run_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	/* cache for control fields of the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct vmcb_control_area ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	bool initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct vcpu_svm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	struct kvm_vcpu vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	struct vmcb *vmcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	unsigned long vmcb_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct svm_cpu_data *svm_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	uint64_t asid_generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	uint64_t sysenter_esp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	uint64_t sysenter_eip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	uint64_t tsc_aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	u64 msr_decfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	u64 next_rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		u16 fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		u16 gs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		u16 ldt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		u64 gs_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	} host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	u64 spec_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	 * translated into the appropriate L2_CFG bits on the host to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	 * perform speculative control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	u64 virt_spec_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	u32 *msrpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	ulong nmi_iret_rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	struct svm_nested_state nested;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	bool nmi_singlestep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	u64 nmi_singlestep_guest_rflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	unsigned int3_injected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	unsigned long int3_rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	/* cached guest cpuid flags for faster access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	bool nrips_enabled	: 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	u32 ldr_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	u32 dfr_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	struct page *avic_backing_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	u64 *avic_physical_id_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	bool avic_is_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	 * Per-vcpu list of struct amd_svm_iommu_ir:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	 * This is used mainly to store interrupt remapping information used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	 * when update the vcpu affinity. This avoids the need to scan for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	 * IRTE and try to match ga_tag in the IOMMU driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	struct list_head ir_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	spinlock_t ir_list_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	/* Save desired MSR intercept (read: pass-through) state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	} shadow_msr_intercept;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct svm_cpu_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	u64 asid_generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	u32 max_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	u32 next_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	u32 min_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	struct kvm_ldttss_desc *tss_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	struct page *save_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	struct vmcb *current_vmcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	/* index = sev_asid, value = vmcb pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	struct vmcb **sev_vmcbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void recalc_intercepts(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	return container_of(kvm, struct kvm_svm, kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	vmcb->control.clean = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			       & ~VMCB_ALWAYS_DIRTY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	vmcb->control.clean &= ~(1 << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	return container_of(vcpu, struct vcpu_svm, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if (is_guest_mode(&svm->vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		return svm->nested.hsave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		return svm->vmcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	__set_bit(bit, (unsigned long *)&control->intercepts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	__clear_bit(bit, (unsigned long *)&control->intercepts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	return test_bit(bit, (unsigned long *)&control->intercepts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static inline void set_dr_intercepts(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	struct vmcb *vmcb = get_host_vmcb(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	recalc_intercepts(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static inline void clr_dr_intercepts(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	struct vmcb *vmcb = get_host_vmcb(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	vmcb->control.intercepts[INTERCEPT_DR] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	recalc_intercepts(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	struct vmcb *vmcb = get_host_vmcb(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	WARN_ON_ONCE(bit >= 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	recalc_intercepts(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	struct vmcb *vmcb = get_host_vmcb(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	WARN_ON_ONCE(bit >= 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	recalc_intercepts(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	struct vmcb *vmcb = get_host_vmcb(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	vmcb_set_intercept(&vmcb->control, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	recalc_intercepts(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct vmcb *vmcb = get_host_vmcb(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	vmcb_clr_intercept(&vmcb->control, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	recalc_intercepts(svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	return vmcb_is_intercept(&svm->vmcb->control, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static inline bool vgif_enabled(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static inline void enable_gif(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	if (vgif_enabled(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		svm->vmcb->control.int_ctl |= V_GIF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		svm->vcpu.arch.hflags |= HF_GIF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static inline void disable_gif(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	if (vgif_enabled(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static inline bool gif_set(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	if (vgif_enabled(svm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* svm.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #define MSR_INVALID				0xffffffffU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) u32 svm_msrpm_offset(u32 msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) u32 *svm_vcpu_alloc_msrpm(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) void svm_vcpu_free_msrpm(u32 *msrpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) void svm_flush_tlb(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) void disable_nmi_singlestep(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) bool svm_smi_blocked(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) void svm_set_gif(struct vcpu_svm *svm, bool value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* nested.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #define NESTED_EXIT_HOST	0	/* Exit handled on host level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #define NESTED_EXIT_DONE	1	/* Exit caused nested vmexit  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #define NESTED_EXIT_CONTINUE	2	/* Further checks needed      */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			 struct vmcb *nested_vmcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) void svm_leave_nested(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) void svm_free_nested(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int svm_allocate_nested(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) int nested_svm_vmrun(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int nested_svm_vmexit(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int nested_svm_exit_handled(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) int nested_svm_check_permissions(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 			       bool has_error_code, u32 error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int nested_svm_exit_special(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) void sync_nested_vmcb_control(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) extern struct kvm_x86_nested_ops svm_nested_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* avic.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK	(0xFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT			31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK		(1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK	(0xFFULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK	(0xFFFFFFFFFFULL << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK		(1ULL << 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK		(1ULL << 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #define VMCB_AVIC_APIC_BAR_MASK		0xFFFFFFFFFF000ULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) extern int avic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	vmcb_mark_dirty(svm->vmcb, VMCB_AVIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	struct vcpu_svm *svm = to_svm(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	u64 *entry = svm->avic_physical_id_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int avic_ga_log_notifier(u32 ga_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) void avic_vm_destroy(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int avic_vm_init(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) void avic_init_vmcb(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int avic_incomplete_ipi_interception(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) int avic_unaccelerated_access_interception(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int avic_init_vcpu(struct vcpu_svm *svm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) void avic_vcpu_put(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) void avic_post_state_restore(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) bool svm_check_apicv_inhibit_reasons(ulong bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		       uint32_t guest_irq, bool set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* sev.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) extern unsigned int max_sev_asid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static inline bool sev_guest(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) #ifdef CONFIG_KVM_AMD_SEV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	return sev->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static inline bool svm_sev_enabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) void sev_vm_destroy(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int svm_register_enc_region(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 			    struct kvm_enc_region *range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) int svm_unregister_enc_region(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 			      struct kvm_enc_region *range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) void pre_sev_run(struct vcpu_svm *svm, int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int __init sev_hardware_setup(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) void sev_hardware_teardown(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) #endif