Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Xen SMP support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * This file implements the Xen versions of smp_ops.  SMP under Xen is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * very straightforward.  Bringing a CPU up is simply a matter of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * loading its initial context and setting it running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * IPIs are handled through the Xen event mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * Because virtual CPUs can be scheduled onto any real CPU, there's no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * useful topology information for the kernel to make use of.  As a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * result, all CPUs are treated as if they're single-core and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * single-threaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/irq_work.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/cpuhotplug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/stackprotector.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/paravirt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/idtentry.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <asm/io_apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <xen/interface/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <xen/interface/vcpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <xen/interface/xenpmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <asm/spec-ctrl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <asm/xen/interface.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <asm/xen/hypercall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #include <xen/hvc-console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #include "xen-ops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #include "mmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #include "smp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #include "pmu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) cpumask_var_t xen_cpu_initialized_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) void asm_cpu_bringup_and_idle(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) static void cpu_bringup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	cr4_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	touch_softlockup_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	/* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		xen_enable_sysenter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		xen_enable_syscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	smp_store_cpu_info(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	cpu_data(cpu).x86_max_cores = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	set_cpu_sibling_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	speculative_store_bypass_ht_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	xen_setup_cpu_clockevents();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	notify_cpu_starting(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	set_cpu_online(cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	cpu_set_state_online(cpu);  /* Implies full memory barrier. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	/* We can take interrupts now: we're officially "up". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) asmlinkage __visible void cpu_bringup_and_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	cpu_bringup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) void xen_smp_intr_free_pv(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	if (per_cpu(xen_irq_work, cpu).irq >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		per_cpu(xen_irq_work, cpu).irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		kfree(per_cpu(xen_irq_work, cpu).name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		per_cpu(xen_irq_work, cpu).name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		per_cpu(xen_pmu_irq, cpu).irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		kfree(per_cpu(xen_pmu_irq, cpu).name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		per_cpu(xen_pmu_irq, cpu).name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int xen_smp_intr_init_pv(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	char *callfunc_name, *pmu_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 				    cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 				    xen_irq_work_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 				    IRQF_PERCPU|IRQF_NOBALANCING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 				    callfunc_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 				    NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	per_cpu(xen_irq_work, cpu).irq = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	per_cpu(xen_irq_work, cpu).name = callfunc_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (is_xen_pmu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 					     xen_pmu_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 					     IRQF_PERCPU|IRQF_NOBALANCING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 					     pmu_name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		per_cpu(xen_pmu_irq, cpu).irq = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		per_cpu(xen_pmu_irq, cpu).name = pmu_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	xen_smp_intr_free_pv(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static void __init _get_smp_config(unsigned int early)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	unsigned int subtract = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	if (early)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	num_processors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	disabled_cpus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	for (i = 0; i < nr_cpu_ids; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		if (rc >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			num_processors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			set_cpu_possible(i, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			set_cpu_possible(i, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 			set_cpu_present(i, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			subtract++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	/* This is akin to using 'nr_cpus' on the Linux command line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	 * have up to X, while nr_cpu_ids is greater than X. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	 * normally is not a problem, except when CPU hotplugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	 * is involved and then there might be more than X CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	 * in the guest - which will not work as there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	 * hypercall to expand the max number of VCPUs an already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	 * running guest has. So cap it up to X. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	if (subtract)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		nr_cpu_ids = nr_cpu_ids - subtract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static void __init xen_pv_smp_prepare_boot_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	BUG_ON(smp_processor_id() != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	native_smp_prepare_boot_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	if (!xen_feature(XENFEAT_writable_page_tables))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		/* We've switched to the "real" per-cpu gdt, so make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		 * sure the old memory can be recycled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		make_lowmem_page_readwrite(xen_initial_gdt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	xen_setup_vcpu_info_placement();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	 * The alternative logic (which patches the unlock/lock) runs before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	 * the smp bootup up code is activated. Hence we need to set this up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	 * the core kernel is being patched. Otherwise we will have only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	 * modules patched but not core code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	xen_init_spinlocks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	unsigned cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	if (skip_ioapic_setup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		char *m = (max_cpus == 0) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			"The nosmp parameter is incompatible with Xen; " \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			"use Xen dom0_max_vcpus=1 parameter" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 			"The noapic parameter is incompatible with Xen";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		xen_raw_printk(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		panic(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	xen_init_lock_cpu(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	smp_store_boot_cpu_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	cpu_data(0).x86_max_cores = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	set_cpu_sibling_map(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	speculative_store_bypass_ht_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	xen_pmu_init(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		panic("could not allocate xen_cpu_initialized_map\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	/* Restrict the possible_map according to max_cpus. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		set_cpu_possible(cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		set_cpu_present(cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	struct vcpu_guest_context *ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	struct desc_struct *gdt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	unsigned long gdt_mfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	/* used to tell cpu_init() that it can proceed with initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	cpumask_set_cpu(cpu, cpu_callout_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	if (ctxt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	gdt = get_cpu_gdt_rw(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	 * Bring up the CPU in cpu_bringup_and_idle() with the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	 * pointing just below where pt_regs would be if it were a normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	 * kernel entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	ctxt->flags = VGCF_IN_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	ctxt->user_regs.ds = __USER_DS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	ctxt->user_regs.es = __USER_DS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	ctxt->user_regs.ss = __KERNEL_DS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	ctxt->user_regs.cs = __KERNEL_CS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	xen_copy_trap_info(ctxt->trap_ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	ctxt->ldt_ents = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	BUG_ON((unsigned long)gdt & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	gdt_mfn = arbitrary_virt_to_mfn(gdt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	make_lowmem_page_readonly(gdt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	ctxt->gdt_frames[0] = gdt_mfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	ctxt->gdt_ents      = GDT_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	 * Set SS:SP that Xen will use when entering guest kernel mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	 * from guest user mode.  Subsequent calls to load_sp0() can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	 * change this value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	ctxt->kernel_ss = __KERNEL_DS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	ctxt->kernel_sp = task_top_of_stack(idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	ctxt->gs_base_kernel = per_cpu_offset(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	ctxt->event_callback_eip    =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		(unsigned long)xen_asm_exc_xen_hypervisor_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	ctxt->failsafe_callback_eip =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		(unsigned long)xen_failsafe_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	kfree(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	rc = common_cpu_up(cpu, idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	xen_setup_runstate_info(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	 * PV VCPUs are always successfully taken down (see 'while' loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	 * in xen_cpu_die()), so -EBUSY is an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	rc = cpu_check_up_prepare(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	/* make sure interrupts start blocked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	rc = cpu_initialize_context(cpu, idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	xen_pmu_init(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	BUG_ON(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	while (cpu_report_state(cpu) != CPU_ONLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int xen_pv_cpu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	if (cpu == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	cpu_disable_common();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	load_cr3(swapper_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void xen_pv_cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	while (HYPERVISOR_vcpu_op(VCPUOP_is_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 				  xen_vcpu_nr(cpu), NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		__set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		schedule_timeout(HZ/10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	if (common_cpu_die(cpu) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		xen_smp_intr_free(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		xen_uninit_lock_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		xen_teardown_timer(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		xen_pmu_finish(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static void xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	play_dead_common();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	cpu_bringup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	 * clears certain data that the cpu_idle loop (which called us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	 * and that we return from) expects. The only way to get that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	 * data back is to call:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	tick_nohz_idle_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	tick_nohz_idle_stop_tick_protected();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	cpuhp_online_idle(CPUHP_AP_ONLINE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #else /* !CONFIG_HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static int xen_pv_cpu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void xen_pv_cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static void xen_pv_play_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static void stop_self(void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	/* make sure we're not pinning something down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	load_cr3(swapper_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	/* should set up a minimal gdt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	set_cpu_online(cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static void xen_pv_stop_other_cpus(int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	smp_call_function(stop_self, NULL, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	irq_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	irq_work_run();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	inc_irq_stat(apic_irq_work_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	irq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static const struct smp_ops xen_smp_ops __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	.smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	.smp_prepare_cpus = xen_pv_smp_prepare_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	.smp_cpus_done = xen_smp_cpus_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	.cpu_up = xen_pv_cpu_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	.cpu_die = xen_pv_cpu_die,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	.cpu_disable = xen_pv_cpu_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	.play_dead = xen_pv_play_dead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	.stop_other_cpus = xen_pv_stop_other_cpus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	.smp_send_reschedule = xen_smp_send_reschedule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	.send_call_func_ipi = xen_smp_send_call_function_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) void __init xen_smp_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	smp_ops = xen_smp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	/* Avoid searching for BIOS MP tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	x86_init.mpparse.find_smp_config = x86_init_noop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	x86_init.mpparse.get_smp_config = _get_smp_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }