^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * KVM Microsoft Hyper-V emulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * derived from arch/x86/kvm/x86.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2006 Qumranet, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2008 Qumranet, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright IBM Corporation, 2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright 2010 Red Hat, Inc. and/or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Avi Kivity <avi@qumranet.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Yaniv Kamay <yaniv@qumranet.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Amit Shah <amit.shah@qumranet.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Ben-Ami Yassour <benami@il.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Andrey Smetanin <asmetanin@virtuozzo.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #ifndef __ARCH_X86_KVM_HYPERV_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define __ARCH_X86_KVM_HYPERV_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * The #defines related to the synthetic debugger are required by KDNet, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * they are not documented in the Hyper-V TLFS because the synthetic debugger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * functionality has been deprecated and is subject to removal in future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * versions of Windows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS 0x40000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define HYPERV_CPUID_SYNDBG_INTERFACE 0x40000081
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES 0x40000082
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Hyper-V synthetic debugger platform capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * These are HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Hyper-V Synthetic debug options MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define HV_X64_MSR_SYNDBG_STATUS 0x400000F2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static inline struct kvm_vcpu_hv *vcpu_to_hv_vcpu(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return &vcpu->arch.hyperv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static inline struct kvm_vcpu *hv_vcpu_to_vcpu(struct kvm_vcpu_hv *hv_vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct kvm_vcpu_arch *arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) arch = container_of(hv_vcpu, struct kvm_vcpu_arch, hyperv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return container_of(arch, struct kvm_vcpu, arch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline struct kvm_vcpu_hv_synic *vcpu_to_synic(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return &vcpu->arch.hyperv.synic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static inline struct kvm_vcpu *synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return hv_vcpu_to_vcpu(container_of(synic, struct kvm_vcpu_hv, synic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static inline struct kvm_hv_syndbg *vcpu_to_hv_syndbg(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return &vcpu->kvm->arch.hyperv.hv_syndbg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) bool kvm_hv_hypercall_enabled(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) void kvm_hv_irq_routing_update(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct hv_vp_assist_page *assist_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int timer_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return &vcpu_to_hv_vcpu(vcpu)->stimer[timer_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static inline struct kvm_vcpu *stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct kvm_vcpu_hv *hv_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) stimer[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return hv_vcpu_to_vcpu(hv_vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return !bitmap_empty(vcpu->arch.hyperv.stimer_pending_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) HV_SYNIC_STIMER_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) void kvm_hv_setup_tsc_page(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct pvclock_vcpu_time_info *hv_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) void kvm_hv_init_vm(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void kvm_hv_destroy_vm(struct kvm *kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct kvm_cpuid_entry2 __user *entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #endif