^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Generic helpers for smp ipi calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) Jens Axboe <jens.axboe@oracle.com> 2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/irq_work.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/sched/idle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "smpboot.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "sched/smp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define CSD_TYPE(_csd) ((_csd)->flags & CSD_FLAG_TYPE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct call_function_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) call_single_data_t __percpu *csd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) cpumask_var_t cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) cpumask_var_t cpumask_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static void flush_smp_call_function_queue(bool warn_cpu_offline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int smpcfd_prepare_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) cpu_to_node(cpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) cpu_to_node(cpu))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) free_cpumask_var(cfd->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) cfd->csd = alloc_percpu(call_single_data_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (!cfd->csd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) free_cpumask_var(cfd->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) free_cpumask_var(cfd->cpumask_ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int smpcfd_dead_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) free_cpumask_var(cfd->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) free_cpumask_var(cfd->cpumask_ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) free_percpu(cfd->csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int smpcfd_dying_cpu(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * The IPIs for the smp-call-function callbacks queued by other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * CPUs might arrive late, either due to hardware latencies or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * because this CPU disabled interrupts (inside stop-machine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * before the IPIs were sent. So flush out any pending callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * explicitly (without waiting for the IPIs to arrive), to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * ensure that the outgoing CPU doesn't go offline with work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * still pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) flush_smp_call_function_queue(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) irq_work_run();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void __init call_function_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) init_llist_head(&per_cpu(call_single_queue, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) smpcfd_prepare_cpu(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static DEFINE_PER_CPU(void *, cur_csd_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static atomic_t csd_bug_count = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Record current CSD work for current CPU, NULL to erase. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void csd_lock_record(struct __call_single_data *csd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!csd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) smp_mb(); /* NULL cur_csd after unlock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) __this_cpu_write(cur_csd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) __this_cpu_write(cur_csd_func, csd->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) __this_cpu_write(cur_csd_info, csd->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) smp_wmb(); /* func and info before csd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) __this_cpu_write(cur_csd, csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) smp_mb(); /* Update cur_csd before function call. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Or before unlock, as the case may be. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned int csd_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) csd_type = CSD_TYPE(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return csd->dst; /* Other CSD_TYPE_ values might not have ->dst. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * Complain if too much time spent waiting. Note that only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * so waiting on other types gets much less information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int cpux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) bool firsttime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u64 ts2, ts_delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) call_single_data_t *cpu_cur_csd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned int flags = READ_ONCE(csd->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!(flags & CSD_FLAG_LOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (!unlikely(*bug_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) cpu = csd_lock_wait_getcpu(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *bug_id, raw_smp_processor_id(), cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ts2 = sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ts_delta = ts2 - *ts1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (likely(ts_delta <= CSD_LOCK_TIMEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) firsttime = !*bug_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (firsttime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) *bug_id = atomic_inc_return(&csd_bug_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) cpu = csd_lock_wait_getcpu(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cpux = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) cpux = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) cpu, csd->func, csd->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (cpu_cur_csd && csd != cpu_cur_csd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) READ_ONCE(per_cpu(cur_csd_info, cpux)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) pr_alert("\tcsd: CSD lock (#%d) %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (cpu >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (!trigger_single_cpu_backtrace(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) dump_cpu_task(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (!cpu_cur_csd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) arch_send_call_function_single_ipi(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) *ts1 = ts2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * For non-synchronous ipi calls the csd can still be in use by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * previous function call. For multi-cpu calls its even more interesting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * as we'll have to ensure no other cpu is observing our csd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static __always_inline void csd_lock_wait(struct __call_single_data *csd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int bug_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) u64 ts0, ts1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ts1 = ts0 = sched_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) smp_acquire__after_ctrl_dep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static void csd_lock_record(struct __call_single_data *csd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static __always_inline void csd_lock_wait(struct __call_single_data *csd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static __always_inline void csd_lock(struct __call_single_data *csd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) csd_lock_wait(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) csd->flags |= CSD_FLAG_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * prevent CPU from reordering the above assignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * to ->flags with any subsequent assignments to other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * fields of the specified call_single_data_t structure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static __always_inline void csd_unlock(struct __call_single_data *csd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * ensure we're all done before releasing data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) smp_store_release(&csd->flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) void __smp_call_single_queue(int cpu, struct llist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * The list addition should be visible before sending the IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * handler locks the list to pull the entry off it because of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * normal cache coherency rules implied by spinlocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * If IPIs can go out of order to the cache coherency protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * in an architecture, sufficient synchronisation should be added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * to arch code to make it appear to obey cache coherency WRT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * locking and barrier primitives. Generic code isn't really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * equipped to do the right thing...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (llist_add(node, &per_cpu(call_single_queue, cpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) send_call_function_single_ipi(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * Insert a previously allocated call_single_data_t element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * for execution on the given CPU. data must already have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * ->func, ->info, and ->flags set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static int generic_exec_single(int cpu, struct __call_single_data *csd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (cpu == smp_processor_id()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) smp_call_func_t func = csd->func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) void *info = csd->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * We can unlock early even for the synchronous on-stack case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * since we're doing this from the same CPU..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) csd_lock_record(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) csd_unlock(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) func(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) csd_lock_record(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) csd_unlock(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) __smp_call_single_queue(cpu, &csd->llist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Invoked by arch to handle an IPI for call function single.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * Must be called with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) void generic_smp_call_function_single_interrupt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) flush_smp_call_function_queue(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * offline CPU. Skip this check if set to 'false'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Flush any pending smp-call-function callbacks queued on this CPU. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * invoked by the generic IPI handler, as well as by a CPU about to go offline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * to ensure that all pending IPI callbacks are run before it goes completely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * Loop through the call_single_queue and run all the queued callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * Must be called with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static void flush_smp_call_function_queue(bool warn_cpu_offline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) call_single_data_t *csd, *csd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct llist_node *entry, *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct llist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static bool warned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) head = this_cpu_ptr(&call_single_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) entry = llist_del_all(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) entry = llist_reverse_order(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* There shouldn't be any pending callbacks on an offline CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) !warned && !llist_empty(head))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) warned = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * We don't have to use the _safe() variant here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * because we are not invoking the IPI handlers yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) llist_for_each_entry(csd, entry, llist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) switch (CSD_TYPE(csd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) case CSD_TYPE_ASYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) case CSD_TYPE_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) case CSD_TYPE_IRQ_WORK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) pr_warn("IPI callback %pS sent to offline CPU\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) csd->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) case CSD_TYPE_TTWU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) pr_warn("IPI task-wakeup sent to offline CPU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) CSD_TYPE(csd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * First; run all SYNC callbacks, people are waiting for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) llist_for_each_entry_safe(csd, csd_next, entry, llist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* Do we wait until *after* callback? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) smp_call_func_t func = csd->func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) void *info = csd->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) prev->next = &csd_next->llist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) entry = &csd_next->llist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) csd_lock_record(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) func(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) csd_unlock(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) csd_lock_record(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) prev = &csd->llist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * Second; run all !SYNC callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) llist_for_each_entry_safe(csd, csd_next, entry, llist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int type = CSD_TYPE(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (type != CSD_TYPE_TTWU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) prev->next = &csd_next->llist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) entry = &csd_next->llist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (type == CSD_TYPE_ASYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) smp_call_func_t func = csd->func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) void *info = csd->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) csd_lock_record(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) csd_unlock(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) func(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) csd_lock_record(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) } else if (type == CSD_TYPE_IRQ_WORK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) irq_work_single(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) prev = &csd->llist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * Third; only CSD_TYPE_TTWU is left, issue those.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) sched_ttwu_pending(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) void flush_smp_call_function_from_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (llist_empty(this_cpu_ptr(&call_single_queue)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) flush_smp_call_function_queue(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (local_softirq_pending())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) do_softirq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * smp_call_function_single - Run a function on a specific CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * @func: The function to run. This must be fast and non-blocking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * @info: An arbitrary pointer to pass to the function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * @wait: If true, wait until function has completed on other CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * Returns 0 on success, else a negative status code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) call_single_data_t *csd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) call_single_data_t csd_stack = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) .flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) int this_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * prevent preemption and reschedule on another processor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * as well as CPU removal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) this_cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * Can deadlock when called with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * We allow cpu's that are not yet online though, as no one else can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * send smp call function interrupt to this cpu and as such deadlocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * can't happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) && !oops_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * When @wait we can deadlock when we interrupt between llist_add() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * csd_lock() on because the interrupt context uses the same csd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * storage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) WARN_ON_ONCE(!in_task());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) csd = &csd_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) csd = this_cpu_ptr(&csd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) csd_lock(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) csd->func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) csd->info = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) csd->src = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) csd->dst = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) err = generic_exec_single(cpu, csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) csd_lock_wait(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) EXPORT_SYMBOL(smp_call_function_single);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * smp_call_function_single_async(): Run an asynchronous function on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * specific CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * @cpu: The CPU to run on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * @csd: Pre-allocated and setup data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * Like smp_call_function_single(), but the call is asynchonous and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * can thus be done from contexts with disabled interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * The caller passes his own pre-allocated data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * (ie: embedded in an object) and is responsible for synchronizing it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * such that the IPIs performed on the @csd are strictly serialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * If the function is called with one csd which has not yet been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * processed by previous call to smp_call_function_single_async(), the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * function will return immediately with -EBUSY showing that the csd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * object is still in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * NOTE: Be careful, there is unfortunately no current debugging facility to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * validate the correctness of this serialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (csd->flags & CSD_FLAG_LOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) csd->flags = CSD_FLAG_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) err = generic_exec_single(cpu, csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) EXPORT_SYMBOL_GPL(smp_call_function_single_async);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * smp_call_function_any - Run a function on any of the given cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * @mask: The mask of cpus it can run on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * @func: The function to run. This must be fast and non-blocking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * @info: An arbitrary pointer to pass to the function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * @wait: If true, wait until function has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * Returns 0 on success, else a negative status code (if no cpus were online).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * Selection preference:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * 1) current cpu if in @mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * 2) any cpu of current node if in @mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * 3) any other online cpu in @mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) int smp_call_function_any(const struct cpumask *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) smp_call_func_t func, void *info, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) const struct cpumask *nodemask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* Try for same CPU (cheapest) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (cpumask_test_cpu(cpu, mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) goto call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /* Try for same node. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) nodemask = cpumask_of_node(cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) cpu = cpumask_next_and(cpu, nodemask, mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (cpu_online(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) goto call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) cpu = cpumask_any_and(mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) call:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ret = smp_call_function_single(cpu, func, info, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) EXPORT_SYMBOL_GPL(smp_call_function_any);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) static void smp_call_function_many_cond(const struct cpumask *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) smp_call_func_t func, void *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) bool wait, smp_cond_func_t cond_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct call_function_data *cfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) int cpu, next_cpu, this_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Can deadlock when called with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * We allow cpu's that are not yet online though, as no one else can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * send smp call function interrupt to this cpu and as such deadlocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * can't happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) && !oops_in_progress && !early_boot_irqs_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * When @wait we can deadlock when we interrupt between llist_add() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * arch_send_call_function_ipi*(); when !@wait we can deadlock due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * csd_lock() on because the interrupt context uses the same csd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * storage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) WARN_ON_ONCE(!in_task());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) cpu = cpumask_first_and(mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (cpu == this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /* No online cpus? We're done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (cpu >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /* Do we have another CPU which isn't us? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (next_cpu == this_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* Fastpath: do that cpu by itself. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (next_cpu >= nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (!cond_func || cond_func(cpu, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) smp_call_function_single(cpu, func, info, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) cfd = this_cpu_ptr(&cfd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) cpumask_and(cfd->cpumask, mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) __cpumask_clear_cpu(this_cpu, cfd->cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /* Some callers race with other cpus changing the passed mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (unlikely(!cpumask_weight(cfd->cpumask)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) cpumask_clear(cfd->cpumask_ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) for_each_cpu(cpu, cfd->cpumask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (cond_func && !cond_func(cpu, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) csd_lock(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) csd->flags |= CSD_TYPE_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) csd->func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) csd->info = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) csd->src = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) csd->dst = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* Send a message to all CPUs in the map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) for_each_cpu(cpu, cfd->cpumask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) call_single_data_t *csd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) csd = per_cpu_ptr(cfd->csd, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) csd_lock_wait(csd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * smp_call_function_many(): Run a function on a set of other CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * @mask: The set of cpus to run on (only runs on online subset).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * @func: The function to run. This must be fast and non-blocking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * @info: An arbitrary pointer to pass to the function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * @wait: If true, wait (atomically) until function has completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * on other CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * If @wait is true, then returns once @func has returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * You must not call this function with disabled interrupts or from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * hardware interrupt handler or from a bottom half handler. Preemption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * must be disabled when calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) void smp_call_function_many(const struct cpumask *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) smp_call_func_t func, void *info, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) smp_call_function_many_cond(mask, func, info, wait, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) EXPORT_SYMBOL(smp_call_function_many);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * smp_call_function(): Run a function on all other CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * @func: The function to run. This must be fast and non-blocking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * @info: An arbitrary pointer to pass to the function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * @wait: If true, wait (atomically) until function has completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * on other CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * Returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * If @wait is true, then returns once @func has returned; otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * it returns just before the target cpu calls @func.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * You must not call this function with disabled interrupts or from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * hardware interrupt handler or from a bottom half handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) void smp_call_function(smp_call_func_t func, void *info, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) smp_call_function_many(cpu_online_mask, func, info, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) EXPORT_SYMBOL(smp_call_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /* Setup configured maximum number of CPUs to activate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) unsigned int setup_max_cpus = NR_CPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) EXPORT_SYMBOL(setup_max_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * Setup routine for controlling SMP activation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * activation entirely (the MPS table probe still happens, though).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * greater than 0, limits the maximum number of CPUs activated in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * SMP mode to <NUM>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) void __weak arch_disable_smp_support(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static int __init nosmp(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) setup_max_cpus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) arch_disable_smp_support();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) early_param("nosmp", nosmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /* this is hard limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) static int __init nrcpus(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) int nr_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) nr_cpu_ids = nr_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) early_param("nr_cpus", nrcpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static int __init maxcpus(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) get_option(&str, &setup_max_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (setup_max_cpus == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) arch_disable_smp_support();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) early_param("maxcpus", maxcpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /* Setup number of possible processor ids */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) EXPORT_SYMBOL(nr_cpu_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) void __init setup_nr_cpu_ids(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) /* Called by boot processor to activate the rest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) void __init smp_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) int num_nodes, num_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) idle_threads_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) cpuhp_threads_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) pr_info("Bringing up secondary CPUs ...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) bringup_nonboot_cpus(setup_max_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) num_nodes = num_online_nodes();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) num_cpus = num_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) pr_info("Brought up %d node%s, %d CPU%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) num_nodes, (num_nodes > 1 ? "s" : ""),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) num_cpus, (num_cpus > 1 ? "s" : ""));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* Any cleanup work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) smp_cpus_done(setup_max_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * Call a function on all processors. May be used during early boot while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * of local_irq_disable/enable().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) void on_each_cpu(smp_call_func_t func, void *info, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) smp_call_function(func, info, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) func(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) EXPORT_SYMBOL(on_each_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * on_each_cpu_mask(): Run a function on processors specified by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * cpumask, which may include the local processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * @mask: The set of cpus to run on (only runs on online subset).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * @func: The function to run. This must be fast and non-blocking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * @info: An arbitrary pointer to pass to the function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * @wait: If true, wait (atomically) until function has completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * on other CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * If @wait is true, then returns once @func has returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * You must not call this function with disabled interrupts or from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * hardware interrupt handler or from a bottom half handler. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * exception is that it may be used during early boot while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * early_boot_irqs_disabled is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) void *info, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) int cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) smp_call_function_many(mask, func, info, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (cpumask_test_cpu(cpu, mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) func(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) EXPORT_SYMBOL(on_each_cpu_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * on_each_cpu_cond(): Call a function on each processor for which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * the supplied function cond_func returns true, optionally waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * for all the required CPUs to finish. This may include the local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * @cond_func: A callback function that is passed a cpu id and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * the info parameter. The function is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * with preemption disabled. The function should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * return a blooean value indicating whether to IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * the specified CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * @func: The function to run on all applicable CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * This must be fast and non-blocking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * @info: An arbitrary pointer to pass to both functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * @wait: If true, wait (atomically) until function has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * completed on other CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * Preemption is disabled to protect against CPUs going offline but not online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * CPUs going online during the call will not be seen or sent an IPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * You must not call this function with disabled interrupts or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * from a hardware interrupt handler or from a bottom half handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) void *info, bool wait, const struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) int cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) smp_call_function_many_cond(mask, func, info, wait, cond_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) func(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) EXPORT_SYMBOL(on_each_cpu_cond_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) void *info, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) EXPORT_SYMBOL(on_each_cpu_cond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) static void do_nothing(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * kick_all_cpus_sync - Force all cpus out of idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * Used to synchronize the update of pm_idle function pointer. It's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * called after the pointer is updated and returns after the dummy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * callback function has been executed on all cpus. The execution of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * the function can only happen on the remote cpus after they have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * left the idle function which had been called via pm_idle function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * pointer. So it's guaranteed that nothing uses the previous pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) void kick_all_cpus_sync(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /* Make sure the change is visible before we kick the cpus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) smp_call_function(do_nothing, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * wake_up_all_idle_cpus - break all cpus out of idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * wake_up_all_idle_cpus try to break all cpus which is in idle state even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * including idle polling cpus, for non-idle cpus, we will do nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) void wake_up_all_idle_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (cpu == smp_processor_id())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) #if IS_ENABLED(CONFIG_SUSPEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (s2idle_state == S2IDLE_STATE_ENTER || cpu_active(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) wake_up_if_idle(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * wake_up_all_online_idle_cpus - break all online cpus out of idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * wake_up_all_online_idle_cpus try to break all online cpus which is in idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * state even including idle polling cpus, for non-idle cpus, we will do nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) void wake_up_all_online_idle_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (cpu == smp_processor_id())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) wake_up_if_idle(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) EXPORT_SYMBOL_GPL(wake_up_all_online_idle_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * smp_call_on_cpu - Call a function on a specific cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * Used to call a function on a specific cpu and wait for it to return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * Optionally make sure the call is done on a specified physical cpu via vcpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * pinning in order to support virtualized environments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct smp_call_on_cpu_struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct completion done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) int (*func)(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static void smp_call_on_cpu_callback(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct smp_call_on_cpu_struct *sscs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) sscs = container_of(work, struct smp_call_on_cpu_struct, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (sscs->cpu >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) hypervisor_pin_vcpu(sscs->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) sscs->ret = sscs->func(sscs->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (sscs->cpu >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) hypervisor_pin_vcpu(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) complete(&sscs->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct smp_call_on_cpu_struct sscs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) .func = func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) .data = par,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) .cpu = phys ? cpu : -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (cpu >= nr_cpu_ids || !cpu_online(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) queue_work_on(cpu, system_wq, &sscs.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) wait_for_completion(&sscs.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return sscs.ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) EXPORT_SYMBOL_GPL(smp_call_on_cpu);