^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xen-ops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "smp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) static void __init xen_hvm_smp_prepare_boot_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) BUG_ON(smp_processor_id() != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) native_smp_prepare_boot_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Setup vcpu_info for boot CPU. Secondary CPUs get their vcpu_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * in xen_cpu_up_prepare_hvm().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) xen_vcpu_setup(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * The alternative logic (which patches the unlock/lock) runs before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * the smp bootup up code is activated. Hence we need to set this up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * the core kernel is being patched. Otherwise we will have only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * modules patched but not core code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) xen_init_spinlocks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) native_smp_prepare_cpus(max_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (xen_have_vector_callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) WARN_ON(xen_smp_intr_init(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) xen_init_lock_cpu(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (cpu == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* Set default vcpu_id to make sure that we don't use cpu-0's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static void xen_hvm_cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (common_cpu_die(cpu) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (xen_have_vector_callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) xen_smp_intr_free(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) xen_uninit_lock_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) xen_teardown_timer(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static void xen_hvm_cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) void __init xen_hvm_smp_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) smp_ops.smp_cpus_done = xen_smp_cpus_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) smp_ops.cpu_die = xen_hvm_cpu_die;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (!xen_have_vector_callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #ifdef CONFIG_PARAVIRT_SPINLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) nopvspin = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }