Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * X86 specific Hyper-V initialization code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2016, Microsoft, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Author : K. Y. Srinivasan <kys@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/hyperv-tlfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/mshyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/idtentry.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/version.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/hyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/cpuhotplug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/syscore_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <clocksource/hyperv_timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) int hyperv_init_cpuhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) void *hv_hypercall_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) EXPORT_SYMBOL_GPL(hv_hypercall_pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) /* Storage to save the hypercall page temporarily for hibernation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static void *hv_hypercall_pg_saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) u32 *hv_vp_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) EXPORT_SYMBOL_GPL(hv_vp_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) struct hv_vp_assist_page **hv_vp_assist_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) EXPORT_SYMBOL_GPL(hv_vp_assist_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) void  __percpu **hyperv_pcpu_input_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) u32 hv_max_vp_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) EXPORT_SYMBOL_GPL(hv_max_vp_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) void *hv_alloc_hyperv_page(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	BUILD_BUG_ON(PAGE_SIZE != HV_HYP_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	return (void *)__get_free_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) void *hv_alloc_hyperv_zeroed_page(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)         BUILD_BUG_ON(PAGE_SIZE != HV_HYP_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)         return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) void hv_free_hyperv_page(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	free_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) EXPORT_SYMBOL_GPL(hv_free_hyperv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) static int hv_cpu_init(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	u64 msr_vp_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	void **input_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	struct page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	/* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	pg = alloc_page(irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (unlikely(!pg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	*input_arg = page_address(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	hv_get_vp_index(msr_vp_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	hv_vp_index[smp_processor_id()] = msr_vp_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (msr_vp_index > hv_max_vp_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		hv_max_vp_index = msr_vp_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	if (!hv_vp_assist_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	 * The VP ASSIST PAGE is an "overlay" page (see Hyper-V TLFS's Section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	 * 5.2.1 "GPA Overlay Pages"). Here it must be zeroed out to make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	 * we always write the EOI MSR in hv_apic_eoi_write() *after* the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	 * EOI optimization is disabled in hv_cpu_die(), otherwise a CPU may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	 * not be stopped in the case of CPU offlining and the VM will hang.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (!*hvp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		*hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (*hvp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		val = vmalloc_to_pfn(*hvp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		val = (val << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static void (*hv_reenlightenment_cb)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void hv_reenlightenment_notify(struct work_struct *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	struct hv_tsc_emulation_status emu_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	/* Don't issue the callback if TSC accesses are not emulated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (hv_reenlightenment_cb && emu_status.inprogress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		hv_reenlightenment_cb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static DECLARE_DELAYED_WORK(hv_reenlightenment_work, hv_reenlightenment_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) void hyperv_stop_tsc_emulation(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	u64 freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	struct hv_tsc_emulation_status emu_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	emu_status.inprogress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	tsc_khz = div64_u64(freq, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline bool hv_reenlightenment_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	 * Check for required features and priviliges to make TSC frequency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	 * change notifications work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	return ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	ack_APIC_irq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	inc_irq_stat(irq_hv_reenlightenment_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	schedule_delayed_work(&hv_reenlightenment_work, HZ/10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void set_hv_tscchange_cb(void (*cb)(void))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	struct hv_reenlightenment_control re_ctrl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		.vector = HYPERV_REENLIGHTENMENT_VECTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		.enabled = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	struct hv_tsc_emulation_control emu_ctrl = {.enabled = 1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	if (!hv_reenlightenment_available()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		pr_warn("Hyper-V: reenlightenment support is unavailable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (!hv_vp_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	hv_reenlightenment_cb = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	/* Make sure callback is registered before we write to MSRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	re_ctrl.target_vp = hv_vp_index[get_cpu()];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) EXPORT_SYMBOL_GPL(set_hv_tscchange_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) void clear_hv_tscchange_cb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	struct hv_reenlightenment_control re_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (!hv_reenlightenment_available())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	re_ctrl.enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	hv_reenlightenment_cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) EXPORT_SYMBOL_GPL(clear_hv_tscchange_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static int hv_cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	struct hv_reenlightenment_control re_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	unsigned int new_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	void **input_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	void *input_pg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	input_pg = *input_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	*input_arg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	free_page((unsigned long)input_pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	if (hv_vp_assist_page && hv_vp_assist_page[cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (hv_reenlightenment_cb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	if (re_ctrl.target_vp == hv_vp_index[cpu]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		 * Reassign reenlightenment notifications to some other online
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		 * CPU or just disable the feature if there are no online CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		 * left (happens on hibernation).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		new_cpu = cpumask_any_but(cpu_online_mask, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		if (new_cpu < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			re_ctrl.target_vp = hv_vp_index[new_cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			re_ctrl.enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static int __init hv_pci_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	int gen2vm = efi_enabled(EFI_BOOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	 * For Generation-2 VM, we exit from pci_arch_init() by returning 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	 * The purpose is to suppress the harmless warning:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	 * "PCI: Fatal: No config space access function found"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	if (gen2vm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	/* For Generation-1 VM, we'll proceed in pci_arch_init().  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static int hv_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	union hv_x64_msr_hypercall_contents hypercall_msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	 * Reset the hypercall page as it is going to be invalidated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	 * accross hibernation. Setting hv_hypercall_pg to NULL ensures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	 * that any subsequent hypercall operation fails safely instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	 * crashing due to an access of an invalid page. The hypercall page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	 * pointer is restored on resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	hv_hypercall_pg_saved = hv_hypercall_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	hv_hypercall_pg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	/* Disable the hypercall page in the hypervisor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	hypercall_msr.enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	ret = hv_cpu_die(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void hv_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	union hv_x64_msr_hypercall_contents hypercall_msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	ret = hv_cpu_init(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	/* Re-enable the hypercall page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	hypercall_msr.enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	hypercall_msr.guest_physical_address =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		vmalloc_to_pfn(hv_hypercall_pg_saved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	hv_hypercall_pg = hv_hypercall_pg_saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	hv_hypercall_pg_saved = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	 * Reenlightenment notifications are disabled by hv_cpu_die(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	 * reenable them here if hv_reenlightenment_cb was previously set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	if (hv_reenlightenment_cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		set_hv_tscchange_cb(hv_reenlightenment_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /* Note: when the ops are called, only CPU0 is online and IRQs are disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static struct syscore_ops hv_syscore_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	.suspend	= hv_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	.resume		= hv_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static void (* __initdata old_setup_percpu_clockev)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static void __init hv_stimer_setup_percpu_clockev(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	 * Ignore any errors in setting up stimer clockevents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	 * as we can run with the LAPIC timer as a fallback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	(void)hv_stimer_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	 * Still register the LAPIC timer, because the direct-mode STIMER is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	 * not supported by old versions of Hyper-V. This also allows users
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	 * to switch to LAPIC timer via /sys, if they want to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	if (old_setup_percpu_clockev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		old_setup_percpu_clockev();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  * This function is to be invoked early in the boot sequence after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  * hypervisor has been detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  * 1. Setup the hypercall page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)  * 2. Register Hyper-V specific clocksource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)  * 3. Setup Hyper-V specific APIC entry points.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) void __init hyperv_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	u64 guest_id, required_msrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	union hv_x64_msr_hypercall_contents hypercall_msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	int cpuhp, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	if (x86_hyper_type != X86_HYPER_MS_HYPERV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	/* Absolutely required MSRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	required_msrs = HV_MSR_HYPERCALL_AVAILABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		HV_MSR_VP_INDEX_AVAILABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	if ((ms_hyperv.features & required_msrs) != required_msrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	 * Allocate the per-CPU state for the hypercall input arg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	 * If this allocation fails, we will not be able to setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	 * (per-CPU) hypercall input page and thus this failure is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	 * fatal on Hyper-V.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	hyperv_pcpu_input_arg = alloc_percpu(void  *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	BUG_ON(hyperv_pcpu_input_arg == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	/* Allocate percpu VP index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 				    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	if (!hv_vp_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	for (i = 0; i < num_possible_cpus(); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		hv_vp_index[i] = VP_INVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	hv_vp_assist_page = kcalloc(num_possible_cpus(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 				    sizeof(*hv_vp_assist_page), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	if (!hv_vp_assist_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		goto free_vp_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	cpuhp = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv_init:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 				  hv_cpu_init, hv_cpu_die);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	if (cpuhp < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		goto free_vp_assist_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	 * Setup the hypercall page and enable hypercalls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	 * 1. Register the guest ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	 * 2. Enable the hypercall and register the hypercall page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 			VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 			__builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	if (hv_hypercall_pg == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		goto remove_cpuhp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	hypercall_msr.enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	 * hyperv_init() is called before LAPIC is initialized: see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	 * apic_intr_mode_init() -> x86_platform.apic_post_init() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	 * apic_bsp_setup() -> setup_local_APIC(). The direct-mode STIMER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	 * depends on LAPIC, so hv_stimer_alloc() should be called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	 * x86_init.timers.setup_percpu_clockev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	old_setup_percpu_clockev = x86_init.timers.setup_percpu_clockev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	x86_init.timers.setup_percpu_clockev = hv_stimer_setup_percpu_clockev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	hv_apic_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	x86_init.pci.arch_init = hv_pci_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	register_syscore_ops(&hv_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	hyperv_init_cpuhp = cpuhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) remove_cpuhp_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	cpuhp_remove_state(cpuhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) free_vp_assist_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	kfree(hv_vp_assist_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	hv_vp_assist_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) free_vp_index:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	kfree(hv_vp_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	hv_vp_index = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)  * This routine is called before kexec/kdump, it does the required cleanup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) void hyperv_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	union hv_x64_msr_hypercall_contents hypercall_msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	unregister_syscore_ops(&hv_syscore_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	/* Reset our OS id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	 * Reset hypercall page reference before reset the page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	 * let hypercall operations fail safely rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	 * panic the kernel for using invalid hypercall page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	hv_hypercall_pg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	/* Reset the hypercall page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	hypercall_msr.as_uint64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	/* Reset the TSC page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	hypercall_msr.as_uint64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) EXPORT_SYMBOL_GPL(hyperv_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	static bool panic_reported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	u64 guest_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	if (in_die && !panic_on_oops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	 * We prefer to report panic on 'die' chain as we have proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	 * registers to report, but if we miss it (e.g. on BUG()) we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	 * to report it on 'panic'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	if (panic_reported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	panic_reported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	rdmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	wrmsrl(HV_X64_MSR_CRASH_P0, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	wrmsrl(HV_X64_MSR_CRASH_P1, guest_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	wrmsrl(HV_X64_MSR_CRASH_P2, regs->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	wrmsrl(HV_X64_MSR_CRASH_P3, regs->ax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	wrmsrl(HV_X64_MSR_CRASH_P4, regs->sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	 * Let Hyper-V know there is crash data available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) EXPORT_SYMBOL_GPL(hyperv_report_panic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)  * hyperv_report_panic_msg - report panic message to Hyper-V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)  * @pa: physical address of the panic page containing the message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)  * @size: size of the message in the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) void hyperv_report_panic_msg(phys_addr_t pa, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	 * P3 to contain the physical address of the panic page & P4 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	 * contain the size of the panic data in that page. Rest of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	 * registers are no-op when the NOTIFY_MSG flag is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	wrmsrl(HV_X64_MSR_CRASH_P0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	wrmsrl(HV_X64_MSR_CRASH_P1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	wrmsrl(HV_X64_MSR_CRASH_P2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	wrmsrl(HV_X64_MSR_CRASH_P3, pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	wrmsrl(HV_X64_MSR_CRASH_P4, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	 * Let Hyper-V know there is crash data available along with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	 * the panic message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	wrmsrl(HV_X64_MSR_CRASH_CTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	       (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) EXPORT_SYMBOL_GPL(hyperv_report_panic_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) bool hv_is_hyperv_initialized(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	union hv_x64_msr_hypercall_contents hypercall_msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	 * Ensure that we're really on Hyper-V, and not a KVM or Xen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	 * emulation of Hyper-V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	if (x86_hyper_type != X86_HYPER_MS_HYPERV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	 * Verify that earlier initialization succeeded by checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	 * that the hypercall page is setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	hypercall_msr.as_uint64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	return hypercall_msr.enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) bool hv_is_hibernation_supported(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	return acpi_sleep_state_supported(ACPI_STATE_S4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);