^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2009, Microsoft Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Haiyang Zhang <haiyangz@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Hank Janssen <hjanssen@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/hyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/version.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/clockchips.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <clocksource/hyperv_timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/mshyperv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "hyperv_vmbus.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* The one and only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct hv_context hv_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * hv_init - Main initialization routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * This routine must be called before any other routines in here are called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) int hv_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (!hv_context.cpu_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * hv_post_message - Post a message using the hypervisor message IPC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * This involves a hypercall.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int hv_post_message(union hv_connection_id connection_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) enum hv_message_type message_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void *payload, size_t payload_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct hv_input_post_message *aligned_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct hv_per_cpu_context *hv_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u64 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) hv_cpu = get_cpu_ptr(hv_context.cpu_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) aligned_msg = hv_cpu->post_msg_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) aligned_msg->connectionid = connection_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) aligned_msg->reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) aligned_msg->message_type = message_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) aligned_msg->payload_size = payload_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) memcpy((void *)aligned_msg->payload, payload, payload_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Preemption must remain disabled until after the hypercall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * so some other thread can't get scheduled onto this cpu and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * corrupt the per-cpu post_msg_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) put_cpu_ptr(hv_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return status & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int hv_synic_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct hv_per_cpu_context *hv_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * First, zero all per-cpu memory areas so hv_synic_free() can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * detect what memory has been allocated and cleanup properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * after any failures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) for_each_present_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) memset(hv_cpu, 0, sizeof(*hv_cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (hv_context.hv_numa_map == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) pr_err("Unable to allocate NUMA map\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) for_each_present_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) tasklet_init(&hv_cpu->msg_dpc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) vmbus_on_msg_dpc, (unsigned long) hv_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) hv_cpu->synic_message_page =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) (void *)get_zeroed_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (hv_cpu->synic_message_page == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) pr_err("Unable to allocate SYNIC message page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (hv_cpu->synic_event_page == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) pr_err("Unable to allocate SYNIC event page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (hv_cpu->post_msg_page == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) pr_err("Unable to allocate post msg page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Any memory allocations that succeeded will be freed when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * the caller cleans up by calling hv_synic_free()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void hv_synic_free(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) for_each_present_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct hv_per_cpu_context *hv_cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) = per_cpu_ptr(hv_context.cpu_context, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) free_page((unsigned long)hv_cpu->synic_event_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) free_page((unsigned long)hv_cpu->synic_message_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) free_page((unsigned long)hv_cpu->post_msg_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) kfree(hv_context.hv_numa_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * hv_synic_init - Initialize the Synthetic Interrupt Controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * If it is already initialized by another entity (ie x2v shim), we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * retrieve the initialized message and event pages. Otherwise, we create and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * initialize the message and event pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) void hv_synic_enable_regs(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct hv_per_cpu_context *hv_cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) = per_cpu_ptr(hv_context.cpu_context, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) union hv_synic_simp simp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) union hv_synic_siefp siefp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) union hv_synic_sint shared_sint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) union hv_synic_scontrol sctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Setup the Synic's message page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) hv_get_simp(simp.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) simp.simp_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) >> HV_HYP_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) hv_set_simp(simp.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* Setup the Synic's event page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) hv_get_siefp(siefp.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) siefp.siefp_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) >> HV_HYP_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) hv_set_siefp(siefp.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Setup the shared SINT. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) shared_sint.vector = hv_get_vector();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) shared_sint.masked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) shared_sint.auto_eoi = hv_recommend_using_aeoi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* Enable the global synic bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) hv_get_synic_state(sctrl.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) sctrl.enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) hv_set_synic_state(sctrl.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int hv_synic_init(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) hv_synic_enable_regs(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * hv_synic_cleanup - Cleanup routine for hv_synic_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) void hv_synic_disable_regs(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) union hv_synic_sint shared_sint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) union hv_synic_simp simp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) union hv_synic_siefp siefp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) union hv_synic_scontrol sctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) shared_sint.masked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* Need to correctly cleanup in the case of SMP!!! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Disable the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) hv_get_simp(simp.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) simp.simp_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) simp.base_simp_gpa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) hv_set_simp(simp.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) hv_get_siefp(siefp.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) siefp.siefp_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) siefp.base_siefp_gpa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) hv_set_siefp(siefp.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* Disable the global synic bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) hv_get_synic_state(sctrl.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) sctrl.enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) hv_set_synic_state(sctrl.as_uint64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int hv_synic_cleanup(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct vmbus_channel *channel, *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) bool channel_found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * Hyper-V does not provide a way to change the connect CPU once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * it is set; we must prevent the connect CPU from going offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * while the VM is running normally. But in the panic or kexec()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * path where the vmbus is already disconnected, the CPU must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * allowed to shut down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (cpu == VMBUS_CONNECT_CPU &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) vmbus_connection.conn_state == CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Search for channels which are bound to the CPU we're about to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * cleanup. In case we find one and vmbus is still connected, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * fail; this will effectively prevent CPU offlining.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * TODO: Re-bind the channels to different CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) mutex_lock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (channel->target_cpu == cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) channel_found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) list_for_each_entry(sc, &channel->sc_list, sc_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (sc->target_cpu == cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) channel_found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (channel_found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) mutex_unlock(&vmbus_connection.channel_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (channel_found && vmbus_connection.conn_state == CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) hv_stimer_legacy_cleanup(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) hv_synic_disable_regs(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }