^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * SMP initialisation and IPI support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Based on arch/arm64/kernel/smp.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2015 Regents of the University of California
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2017 SiFive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/profile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/irq_work.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/sbi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) enum ipi_message_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) IPI_RESCHEDULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) IPI_CALL_FUNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) IPI_CPU_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) IPI_IRQ_WORK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) IPI_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned long __cpuid_to_hartid_map[NR_CPUS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) [0 ... NR_CPUS-1] = INVALID_HARTID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) void __init smp_setup_processor_id(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) cpuid_to_hartid_map(0) = boot_cpu_hartid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* A collection of single bit ipi messages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned long stats[IPI_MAX] ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long bits ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) } ipi_data[NR_CPUS] __cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int riscv_hartid_to_cpuid(int hartid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) for (i = 0; i < NR_CPUS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (cpuid_to_hartid_map(i) == hartid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) cpumask_clear(out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) for_each_cpu(cpu, in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) EXPORT_SYMBOL_GPL(riscv_cpuid_to_hartid_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return phys_id == cpuid_to_hartid_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* Unsupported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int setup_profiling_timer(unsigned int multiplier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static void ipi_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) set_cpu_online(smp_processor_id(), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) while (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) wait_for_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static struct riscv_ipi_ops *ipi_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ipi_ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) void riscv_clear_ipi(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (ipi_ops && ipi_ops->ipi_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ipi_ops->ipi_clear();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) csr_clear(CSR_IP, IE_SIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) EXPORT_SYMBOL_GPL(riscv_clear_ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) for_each_cpu(cpu, mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) set_bit(op, &ipi_data[cpu].bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (ipi_ops && ipi_ops->ipi_inject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ipi_ops->ipi_inject(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) pr_warn("SMP: IPI inject method not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static void send_ipi_single(int cpu, enum ipi_message_type op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) set_bit(op, &ipi_data[cpu].bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (ipi_ops && ipi_ops->ipi_inject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ipi_ops->ipi_inject(cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) pr_warn("SMP: IPI inject method not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #ifdef CONFIG_IRQ_WORK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) void arch_irq_work_raise(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) send_ipi_single(smp_processor_id(), IPI_IRQ_WORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void handle_IPI(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct pt_regs *old_regs = set_irq_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned long *stats = ipi_data[smp_processor_id()].stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) irq_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) riscv_clear_ipi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned long ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* Order bit clearing and data access. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ops = xchg(pending_ipis, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (ops == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (ops & (1 << IPI_RESCHEDULE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) stats[IPI_RESCHEDULE]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) scheduler_ipi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (ops & (1 << IPI_CALL_FUNC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) stats[IPI_CALL_FUNC]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) generic_smp_call_function_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (ops & (1 << IPI_CPU_STOP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) stats[IPI_CPU_STOP]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ipi_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (ops & (1 << IPI_IRQ_WORK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) stats[IPI_IRQ_WORK]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) irq_work_run();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) BUG_ON((ops >> IPI_MAX) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* Order data access and bit testing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) irq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) set_irq_regs(old_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static const char * const ipi_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) [IPI_RESCHEDULE] = "Rescheduling interrupts",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) [IPI_CALL_FUNC] = "Function call interrupts",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) [IPI_CPU_STOP] = "CPU stop interrupts",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) [IPI_IRQ_WORK] = "IRQ work interrupts",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void show_ipi_stats(struct seq_file *p, int prec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned int cpu, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) for (i = 0; i < IPI_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) prec >= 4 ? " " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) for_each_online_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) seq_printf(p, " %s\n", ipi_names[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) void arch_send_call_function_ipi_mask(struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) send_ipi_mask(mask, IPI_CALL_FUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void arch_send_call_function_single_ipi(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) send_ipi_single(cpu, IPI_CALL_FUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) void smp_send_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (num_online_cpus() > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) cpumask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) cpumask_copy(&mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) cpumask_clear_cpu(smp_processor_id(), &mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (system_state <= SYSTEM_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) pr_crit("SMP: stopping secondary CPUs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) send_ipi_mask(&mask, IPI_CPU_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* Wait up to one second for other CPUs to stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) timeout = USEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) while (num_online_cpus() > 1 && timeout--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (num_online_cpus() > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) cpumask_pr_args(cpu_online_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) void smp_send_reschedule(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) send_ipi_single(cpu, IPI_RESCHEDULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) EXPORT_SYMBOL_GPL(smp_send_reschedule);