^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Uniprocessor-only support functions. The counterpart to kernel/smp.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) if (cpu != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) func(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) EXPORT_SYMBOL(smp_call_function_single);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) csd->func(csd->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) EXPORT_SYMBOL(smp_call_function_single_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void on_each_cpu(smp_call_func_t func, void *info, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) func(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) EXPORT_SYMBOL(on_each_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Note we still need to test the mask even for UP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * because we actually can get an empty mask from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * code that on SMP might call us without the local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * CPU in the mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void on_each_cpu_mask(const struct cpumask *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) smp_call_func_t func, void *info, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (cpumask_test_cpu(0, mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) func(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) EXPORT_SYMBOL(on_each_cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Preemption is disabled here to make sure the cond_func is called under the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * same condtions in UP and SMP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void *info, bool wait, const struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (cond_func(0, info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) func(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) EXPORT_SYMBOL(on_each_cpu_cond_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) void *info, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) EXPORT_SYMBOL(on_each_cpu_cond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (cpu != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) hypervisor_pin_vcpu(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ret = func(par);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) hypervisor_pin_vcpu(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) EXPORT_SYMBOL_GPL(smp_call_on_cpu);