^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * SMP support for ppc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * deal of code from the sparc and intel versions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #undef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/sched/topology.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/topology.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/profile.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/stackprotector.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/hw_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/kvm_ppc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/dbell.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <asm/cputhreads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <asm/mpic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <asm/vdso_datapage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <asm/paca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <asm/vdso.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <asm/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <asm/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <asm/asm-prototypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <asm/cpu_has_feature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <asm/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <asm/kup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <asm/fadump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <asm/udbg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define DBG(fmt...) udbg_printf(fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define DBG(fmt...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* State of each CPU during hotplug phases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static DEFINE_PER_CPU(int, cpu_state) = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct task_struct *secondary_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) bool has_big_cores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) bool coregroup_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) EXPORT_PER_CPU_SYMBOL(cpu_core_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) EXPORT_SYMBOL_GPL(has_big_cores);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #ifdef CONFIG_SCHED_SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) smt_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) cache_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) mc_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) die_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define MAX_THREAD_LIST_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define THREAD_GROUP_SHARE_L1 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct thread_groups {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned int property;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unsigned int nr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned int threads_per_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned int thread_list[MAX_THREAD_LIST_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * the set its siblings that share the L1-cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* SMP operations for this machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct smp_ops_t *smp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Can't be static due to PowerMac hackery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) volatile unsigned int cpu_callin_map[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int smt_enabled_at_boot = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Returns 1 if the specified cpu should be brought up during boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * Used to inhibit booting threads if they've been disabled or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * limited on the command line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int smp_generic_cpu_bootable(unsigned int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Special case - we inhibit secondary thread startup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * during boot if the user requests it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (smt_enabled_at_boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int smp_generic_kick_cpu(int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (nr < 0 || nr >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * The processor is currently spinning, waiting for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * cpu_start field to become non-zero After we set cpu_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * the processor will continue on to secondary_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (!paca_ptrs[nr]->cpu_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) paca_ptrs[nr]->cpu_start = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Ok it's not there, so it might be soft-unplugged, let's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * try to bring it back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) generic_set_cpu_up(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) smp_send_reschedule(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #endif /* CONFIG_HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static irqreturn_t call_function_action(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) generic_smp_call_function_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static irqreturn_t reschedule_action(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) scheduler_ipi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) timer_broadcast_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #ifdef CONFIG_NMI_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static irqreturn_t nmi_ipi_action(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) smp_handle_nmi_ipi(get_irq_regs());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static irq_handler_t smp_ipi_action[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) [PPC_MSG_CALL_FUNCTION] = call_function_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) [PPC_MSG_RESCHEDULE] = reschedule_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #ifdef CONFIG_NMI_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) [PPC_MSG_NMI_IPI] = nmi_ipi_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * The NMI IPI is a fallback and not truly non-maskable. It is simpler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * than going through the call function infrastructure, and strongly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * serialized, so it is more appropriate for debugging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) const char *smp_ipi_name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) [PPC_MSG_CALL_FUNCTION] = "ipi call function",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) [PPC_MSG_RESCHEDULE] = "ipi reschedule",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #ifdef CONFIG_NMI_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) [PPC_MSG_NMI_IPI] = "nmi ipi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* optional function to request ipi, for controllers with >= 4 ipis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int smp_request_message_ipi(int virq, int msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (msg < 0 || msg > PPC_MSG_NMI_IPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #ifndef CONFIG_NMI_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (msg == PPC_MSG_NMI_IPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) err = request_irq(virq, smp_ipi_action[msg],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) smp_ipi_name[msg], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) virq, smp_ipi_name[msg], err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #ifdef CONFIG_PPC_SMP_MUXED_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct cpu_messages {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) long messages; /* current messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) void smp_muxed_ipi_set_message(int cpu, int msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct cpu_messages *info = &per_cpu(ipi_message, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) char *message = (char *)&info->messages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * Order previous accesses before accesses in the IPI handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) message[msg] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) void smp_muxed_ipi_message_pass(int cpu, int msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) smp_muxed_ipi_set_message(cpu, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * cause_ipi functions are required to include a full barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * before doing whatever causes the IPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) smp_ops->cause_ipi(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #ifdef __BIG_ENDIAN__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #define IPI_MESSAGE(A) (1uL << (8 * (A)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) irqreturn_t smp_ipi_demux(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) mb(); /* order any irq clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return smp_ipi_demux_relaxed();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* sync-free variant. Callers should ensure synchronization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) irqreturn_t smp_ipi_demux_relaxed(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct cpu_messages *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) unsigned long all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) info = this_cpu_ptr(&ipi_message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) all = xchg(&info->messages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * Must check for PPC_MSG_RM_HOST_ACTION messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * before PPC_MSG_CALL_FUNCTION messages because when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * a VM is destroyed, we call kick_all_cpus_sync()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * to ensure that any pending PPC_MSG_RM_HOST_ACTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * messages have completed before we free any VCPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) kvmppc_xics_ipi_action();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) generic_smp_call_function_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) scheduler_ipi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) timer_broadcast_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #ifdef CONFIG_NMI_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) nmi_ipi_action(0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) } while (info->messages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #endif /* CONFIG_PPC_SMP_MUXED_IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static inline void do_message_pass(int cpu, int msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (smp_ops->message_pass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) smp_ops->message_pass(cpu, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) #ifdef CONFIG_PPC_SMP_MUXED_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) smp_muxed_ipi_message_pass(cpu, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) void smp_send_reschedule(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (likely(smp_ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) do_message_pass(cpu, PPC_MSG_RESCHEDULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) EXPORT_SYMBOL_GPL(smp_send_reschedule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) void arch_send_call_function_single_ipi(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) void arch_send_call_function_ipi_mask(const struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) for_each_cpu(cpu, mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #ifdef CONFIG_NMI_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * "NMI IPI" system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * NMI IPIs may not be recoverable, so should not be used as ongoing part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * a running system. They can be used for crash, debug, halt/reboot, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * The IPI call waits with interrupts disabled until all targets enter the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * NMI handler, then returns. Subsequent IPIs can be issued before targets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * have returned from their handlers, so there is no guarantee about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * concurrency or re-entrancy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * A new NMI can be issued before all targets exit the handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * The IPI call may time out without all targets entering the NMI handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * In that case, there is some logic to recover (and ignore subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * NMI interrupts that may eventually be raised), but the platform interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * handler may not be able to distinguish this from other exception causes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * which may cause a crash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static struct cpumask nmi_ipi_pending_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static bool nmi_ipi_busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static void nmi_ipi_lock_start(unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) raw_local_irq_save(*flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) hard_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) raw_local_irq_restore(*flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) raw_local_irq_save(*flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) hard_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static void nmi_ipi_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static void nmi_ipi_unlock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) atomic_set(&__nmi_ipi_lock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static void nmi_ipi_unlock_end(unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) nmi_ipi_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) raw_local_irq_restore(*flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * Platform NMI handler calls this to ack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int smp_handle_nmi_ipi(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) void (*fn)(struct pt_regs *) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int me = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * Unexpected NMIs are possible here because the interrupt may not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * be able to distinguish NMI IPIs from other types of NMIs, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * because the caller may have timed out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) nmi_ipi_lock_start(&flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) fn = READ_ONCE(nmi_ipi_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) WARN_ON_ONCE(!fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) nmi_ipi_unlock_end(&flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) fn(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static void do_smp_send_nmi_ipi(int cpu, bool safe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (cpu >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) do_message_pass(cpu, PPC_MSG_NMI_IPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) int c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) for_each_online_cpu(c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (c == raw_smp_processor_id())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) do_message_pass(c, PPC_MSG_NMI_IPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * - fn is the target callback function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * - delay_us > 0 is the delay before giving up waiting for targets to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * begin executing the handler, == 0 specifies indefinite delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) u64 delay_us, bool safe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int me = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) BUG_ON(cpu == me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (unlikely(!smp_ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) nmi_ipi_lock_start(&flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) while (nmi_ipi_busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) nmi_ipi_unlock_end(&flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) spin_until_cond(!nmi_ipi_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) nmi_ipi_lock_start(&flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) nmi_ipi_busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) nmi_ipi_function = fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (cpu < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /* ALL_OTHERS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) nmi_ipi_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* Interrupts remain hard disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) do_smp_send_nmi_ipi(cpu, safe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) nmi_ipi_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /* nmi_ipi_busy is set here, so unlock/lock is okay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) while (!cpumask_empty(&nmi_ipi_pending_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) nmi_ipi_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) nmi_ipi_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (delay_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) delay_us--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (!delay_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (!cpumask_empty(&nmi_ipi_pending_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) cpumask_clear(&nmi_ipi_pending_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) nmi_ipi_function = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) nmi_ipi_busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) nmi_ipi_unlock_end(&flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #endif /* CONFIG_NMI_IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) void tick_broadcast(const struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) for_each_cpu(cpu, mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) #ifdef CONFIG_DEBUGGER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) void debugger_ipi_callback(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) debugger_ipi(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) void smp_send_debugger_break(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #ifdef CONFIG_KEXEC_CORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (kdump_in_progress() && crash_wake_offline) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) for_each_present_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (cpu_online(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * crash_ipi_callback will wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * all cpus, including offline CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * We don't care about nmi_ipi_function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * Offline cpus will jump straight into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * crash_ipi_callback, we can skip the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * entire NMI dance and waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * cpus to clear pending mask, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) do_smp_send_nmi_ipi(cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) #ifdef CONFIG_NMI_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static void crash_stop_this_cpu(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static void crash_stop_this_cpu(void *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * Just busy wait here and avoid marking CPU as offline to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * register data is captured appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) while (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) void crash_smp_send_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static bool stopped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * In case of fadump, register data for all CPUs is captured by f/w
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * this rtas call to avoid tricky post processing of those CPUs'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * backtraces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (should_fadump_crash())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) stopped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) #ifdef CONFIG_NMI_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_stop_this_cpu, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) smp_call_function(crash_stop_this_cpu, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) #endif /* CONFIG_NMI_IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) #ifdef CONFIG_NMI_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static void nmi_stop_this_cpu(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * IRQs are already hard disabled by the smp_handle_nmi_ipi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) set_cpu_online(smp_processor_id(), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) spin_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) while (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) spin_cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) void smp_send_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) #else /* CONFIG_NMI_IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static void stop_this_cpu(void *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) hard_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * to know other CPUs are offline before it breaks locks to flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * printk buffers, in case we panic()ed while holding the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) set_cpu_online(smp_processor_id(), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) spin_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) while (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) spin_cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) void smp_send_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static bool stopped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * Prevent waiting on csd lock from a previous smp_send_stop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * This is racy, but in general callers try to do the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * thing and only fire off one smp_send_stop (e.g., see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * kernel/panic.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) stopped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) smp_call_function(stop_this_cpu, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) #endif /* CONFIG_NMI_IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct task_struct *current_set[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static void smp_store_cpu_info(int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) #ifdef CONFIG_PPC_FSL_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) per_cpu(next_tlbcam_idx, id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * rather than just passing around the cpumask we pass around a function that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * returns the that cpumask for the given CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) cpumask_set_cpu(i, get_cpumask(j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) cpumask_set_cpu(j, get_cpumask(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static void set_cpus_unrelated(int i, int j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct cpumask *(*get_cpumask)(int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) cpumask_clear_cpu(i, get_cpumask(j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) cpumask_clear_cpu(j, get_cpumask(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * Extends set_cpus_related. Instead of setting one CPU at a time in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct cpumask *(*dstmask)(int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct cpumask *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) int k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) mask = srcmask(j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) for_each_cpu(k, srcmask(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) cpumask_or(dstmask(k), dstmask(k), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (i == j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) mask = srcmask(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) for_each_cpu(k, srcmask(j))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) cpumask_or(dstmask(k), dstmask(k), mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * parse_thread_groups: Parses the "ibm,thread-groups" device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * property for the CPU device node @dn and stores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * the parsed output in the thread_groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * structure @tg if the ibm,thread-groups[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * matches @property.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * @dn: The device node of the CPU device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * @tg: Pointer to a thread group structure into which the parsed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * output of "ibm,thread-groups" is stored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * @property: The property of the thread-group that the caller is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * interested in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * ibm,thread-groups[0..N-1] array defines which group of threads in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * the CPU-device node can be grouped together based on the property.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * ibm,thread-groups[0] tells us the property based on which the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * threads are being grouped together. If this value is 1, it implies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * that the threads in the same group share L1, translation cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * ibm,thread-groups[1] tells us how many such thread groups exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * ibm,thread-groups[2] tells us the number of threads in each such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * ibm,thread-groups[3..N-1] is the list of threads identified by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * "ibm,ppc-interrupt-server#s" arranged as per their membership in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * the grouping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * implies that there are 2 groups of 4 threads each, where each group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * of threads share L1, translation cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * 11, 12} structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * Returns 0 on success, -EINVAL if the property does not exist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * -ENODATA if property does not have a value, and -EOVERFLOW if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * property data isn't large enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) static int parse_thread_groups(struct device_node *dn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct thread_groups *tg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) unsigned int property)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) u32 *thread_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) size_t total_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) ret = of_property_read_u32_array(dn, "ibm,thread-groups",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) thread_group_array, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) tg->property = thread_group_array[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) tg->nr_groups = thread_group_array[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) tg->threads_per_group = thread_group_array[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (tg->property != property ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) tg->nr_groups < 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) tg->threads_per_group < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) total_threads = tg->nr_groups * tg->threads_per_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) ret = of_property_read_u32_array(dn, "ibm,thread-groups",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) thread_group_array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 3 + total_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) thread_list = &thread_group_array[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) for (i = 0 ; i < total_threads; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) tg->thread_list[i] = thread_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * that @cpu belongs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * @cpu : The logical CPU whose thread group is being searched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * @tg : The thread-group structure of the CPU node which @cpu belongs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * Returns the index to tg->thread_list that points to the the start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * of the thread_group that @cpu belongs to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * Returns -1 if cpu doesn't belong to any of the groups pointed to by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * tg->thread_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) int hw_cpu_id = get_hard_smp_processor_id(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) for (i = 0; i < tg->nr_groups; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) int group_start = i * tg->threads_per_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) for (j = 0; j < tg->threads_per_group; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) int idx = group_start + j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (tg->thread_list[idx] == hw_cpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return group_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) static int init_cpu_l1_cache_map(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct device_node *dn = of_get_cpu_node(cpu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct thread_groups tg = {.property = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) .nr_groups = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) .threads_per_group = 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) int first_thread = cpu_first_thread_sibling(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) int i, cpu_group_start = -1, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (!dn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (unlikely(cpu_group_start == -1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) err = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) GFP_KERNEL, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) for (i = first_thread; i < first_thread + threads_per_core; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) int i_group_start = get_cpu_thread_group_start(i, &tg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (unlikely(i_group_start == -1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) err = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (i_group_start == cpu_group_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) of_node_put(dn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static bool shared_caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) #ifdef CONFIG_SCHED_SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* cpumask of CPUs with asymmetric SMT dependency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) static int powerpc_smt_flags(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) flags |= SD_ASYM_PACKING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * This topology makes it *much* cheaper to migrate tasks between adjacent cores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * since the migrated task remains cache hot. We want to take advantage of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * at the scheduler level so an extra topology level is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) static int powerpc_shared_cache_flags(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return SD_SHARE_PKG_RESOURCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * We can't just pass cpu_l2_cache_mask() directly because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * returns a non-const pointer and the compiler barfs on that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) static const struct cpumask *shared_cache_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return per_cpu(cpu_l2_cache_map, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) #ifdef CONFIG_SCHED_SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) static const struct cpumask *smallcore_smt_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return cpu_smallcore_mask(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static struct cpumask *cpu_coregroup_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return per_cpu(cpu_coregroup_map, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) static bool has_coregroup_support(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return coregroup_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) static const struct cpumask *cpu_mc_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return cpu_coregroup_mask(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) static struct sched_domain_topology_level powerpc_topology[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) #ifdef CONFIG_SCHED_SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) { cpu_mc_mask, SD_INIT_NAME(MC) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) { cpu_cpu_mask, SD_INIT_NAME(DIE) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) { NULL, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) static int __init init_big_cores(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) int err = init_cpu_l1_cache_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) has_big_cores = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) void __init smp_prepare_cpus(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) DBG("smp_prepare_cpus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * setup_cpu may need to be called on the boot cpu. We havent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * spun any cpus up but lets be paranoid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) BUG_ON(boot_cpuid != smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /* Fixup boot cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) smp_store_cpu_info(boot_cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) cpu_callin_map[boot_cpuid] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) GFP_KERNEL, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) GFP_KERNEL, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) GFP_KERNEL, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (has_coregroup_support())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) GFP_KERNEL, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) #ifdef CONFIG_NEED_MULTIPLE_NODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * numa_node_id() works after this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (cpu_present(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) set_cpu_numa_mem(cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) local_memory_node(numa_cpu_lookup_table[cpu]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /* Init the cpumasks so the boot CPU is related to itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (has_coregroup_support())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) init_big_cores();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (has_big_cores) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) cpumask_set_cpu(boot_cpuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) cpu_smallcore_mask(boot_cpuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (smp_ops && smp_ops->probe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) smp_ops->probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) void smp_prepare_boot_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) BUG_ON(smp_processor_id() != boot_cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) paca_ptrs[boot_cpuid]->__current = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) current_set[boot_cpuid] = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) int generic_cpu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (cpu == boot_cpuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) set_cpu_online(cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) vdso_data->processorCount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /* Update affinity of all IRQs previously aimed at this CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) irq_migrate_all_off_this_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * Depending on the details of the interrupt controller, it's possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * that one of the interrupts we just migrated away from this CPU is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * actually already pending on this CPU. If we leave it in that state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * the interrupt will never be EOI'ed, and will never fire again. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * temporarily enable interrupts here, to allow any pending interrupt to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * be received (and EOI'ed), before we take this CPU offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) void generic_cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (is_cpu_dead(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) printk(KERN_ERR "CPU%d didn't die...\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) void generic_set_cpu_dead(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) per_cpu(cpu_state, cpu) = CPU_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * which makes the delay in generic_cpu_die() not happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) void generic_set_cpu_up(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) int generic_check_cpu_restart(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) int is_cpu_dead(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return per_cpu(cpu_state, cpu) == CPU_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static bool secondaries_inhibited(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return kvm_hv_mode_active();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) #else /* HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) #define secondaries_inhibited() 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) paca_ptrs[cpu]->__current = idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) THREAD_SIZE - STACK_FRAME_OVERHEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) idle->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) secondary_current = current_set[cpu] = idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) int __cpu_up(unsigned int cpu, struct task_struct *tidle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) int rc, c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * Don't allow secondary threads to come online if inhibited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (threads_per_core > 1 && secondaries_inhibited() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) cpu_thread_in_subcore(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (smp_ops == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) cpu_idle_thread_init(cpu, tidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * The platform might need to allocate resources prior to bringing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * up the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (smp_ops->prepare_cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) rc = smp_ops->prepare_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /* Make sure callin-map entry is 0 (can be leftover a CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * hotplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) cpu_callin_map[cpu] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* The information for processor bringup must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * be written out to main store before we release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * the processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /* wake up cpus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) DBG("smp: kicking cpu %d\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) rc = smp_ops->kick_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * wait to see if the cpu made a callin (is actually up).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * use this value that I found through experimentation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * -- Cort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (system_state < SYSTEM_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) for (c = 50000; c && !cpu_callin_map[cpu]; c--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * CPUs can take much longer to come up in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * hotplug case. Wait five seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) for (c = 5000; c && !cpu_callin_map[cpu]; c--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (!cpu_callin_map[cpu]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) printk(KERN_ERR "Processor %u is stuck.\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) DBG("Processor %u found.\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (smp_ops->give_timebase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) smp_ops->give_timebase();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /* Wait until cpu puts itself in the online & active maps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) spin_until_cond(cpu_online(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /* Return the value of the reg property corresponding to the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * logical cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) int cpu_to_core_id(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) const __be32 *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) int id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) np = of_get_cpu_node(cpu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (!np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) reg = of_get_property(np, "reg", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) id = be32_to_cpup(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) EXPORT_SYMBOL_GPL(cpu_to_core_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) /* Helper routines for cpu to core mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) int cpu_core_index_of_thread(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return cpu >> threads_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) int cpu_first_thread_of_core(int core)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) return core << threads_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) /* Must be called when no change can occur to cpu_present_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * i.e. during cpu online or offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) static struct device_node *cpu_to_l2cache(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct device_node *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (!cpu_present(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) np = of_get_cpu_node(cpu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (np == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) cache = of_find_next_cache_node(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) return cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct device_node *l2_cache, *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (has_big_cores)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) submask_fn = cpu_smallcore_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) l2_cache = cpu_to_l2cache(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (!l2_cache || !*mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /* Assume only core siblings share cache with this CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) for_each_cpu(i, submask_fn(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) set_cpus_related(cpu, i, cpu_l2_cache_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /* Update l2-cache mask with all the CPUs that are part of submask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) /* Skip all CPUs already part of current CPU l2-cache mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) for_each_cpu(i, *mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * when updating the marks the current CPU has not been marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * online, but we need to update the cache masks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) np = cpu_to_l2cache(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /* Skip all CPUs already part of current CPU l2-cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (np == l2_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) cpumask_andnot(*mask, *mask, submask_fn(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) of_node_put(l2_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) static void remove_cpu_from_masks(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (shared_caches)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) mask_fn = cpu_l2_cache_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) for_each_cpu(i, mask_fn(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) set_cpus_unrelated(cpu, i, cpu_sibling_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (has_big_cores)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) for_each_cpu(i, cpu_core_mask(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) set_cpus_unrelated(cpu, i, cpu_core_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (has_coregroup_support()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) for_each_cpu(i, cpu_coregroup_mask(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) static inline void add_cpu_to_smallcore_masks(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (!has_big_cores)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) for_each_cpu(i, per_cpu(cpu_l1_cache_map, cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (cpu_online(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) set_cpus_related(i, cpu, cpu_smallcore_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) int coregroup_id = cpu_to_coregroup_id(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (shared_caches)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) submask_fn = cpu_l2_cache_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (!*mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /* Assume only siblings are part of this CPU's coregroup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) for_each_cpu(i, submask_fn(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) set_cpus_related(cpu, i, cpu_coregroup_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) /* Update coregroup mask with all the CPUs that are part of submask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) /* Skip all CPUs already part of coregroup mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) for_each_cpu(i, *mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /* Skip all CPUs not part of this coregroup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (coregroup_id == cpu_to_coregroup_id(i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) cpumask_andnot(*mask, *mask, submask_fn(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) static void add_cpu_to_masks(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) int first_thread = cpu_first_thread_sibling(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) int chip_id = cpu_to_chip_id(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) cpumask_var_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * This CPU will not be in the online mask yet so we need to manually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) * add it to it's own thread sibling mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) cpumask_set_cpu(cpu, cpu_core_mask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) for (i = first_thread; i < first_thread + threads_per_core; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (cpu_online(i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) set_cpus_related(i, cpu, cpu_sibling_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) add_cpu_to_smallcore_masks(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) /* In CPU-hotplug path, hence use GFP_ATOMIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) update_mask_by_l2(cpu, &mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (has_coregroup_support())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) update_coregroup_mask(cpu, &mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (shared_caches)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) submask_fn = cpu_l2_cache_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /* Update core_mask with all the CPUs that are part of submask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /* Skip all CPUs already part of current CPU core mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) /* If chip_id is -1; limit the cpu_core_mask to within DIE*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (chip_id == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) cpumask_and(mask, mask, cpu_cpu_mask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) for_each_cpu(i, mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (chip_id == cpu_to_chip_id(i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) cpumask_andnot(mask, mask, submask_fn(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) cpumask_andnot(mask, mask, cpu_core_mask(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) free_cpumask_var(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /* Activate a secondary processor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) void start_secondary(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) unsigned int cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) mmgrab(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) current->active_mm = &init_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) smp_store_cpu_info(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) set_dec(tb_ticks_per_jiffy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) rcu_cpu_starting(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) cpu_callin_map[cpu] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (smp_ops->setup_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) smp_ops->setup_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (smp_ops->take_timebase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) smp_ops->take_timebase();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) secondary_cpu_time_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (system_state == SYSTEM_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) vdso_data->processorCount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) vdso_getcpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) set_numa_node(numa_cpu_lookup_table[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) /* Update topology CPU masks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) add_cpu_to_masks(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * Check for any shared caches. Note that this must be done on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * per-core basis because one core in the pair might be disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (!shared_caches) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) struct cpumask *mask = cpu_l2_cache_mask(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (has_big_cores)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) sibling_mask = cpu_smallcore_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) shared_caches = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) notify_cpu_starting(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) set_cpu_online(cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) boot_init_stack_canary();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) /* We can enable ftrace for secondary cpus now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) this_cpu_enable_ftrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) #ifdef CONFIG_PROFILING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) int setup_profiling_timer(unsigned int multiplier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) static void fixup_topology(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) #ifdef CONFIG_SCHED_SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (has_big_cores) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) pr_info("Big cores detected but using small core scheduling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) powerpc_topology[smt_idx].mask = smallcore_smt_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (!has_coregroup_support())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * Try to consolidate topology levels here instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * allowing scheduler to degenerate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * - Dont consolidate if masks are different.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * - Dont consolidate if sd_flags exists and are different.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) for (i = 1; i <= die_idx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (!powerpc_topology[i - 1].sd_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) powerpc_topology[i].mask = powerpc_topology[i + 1].mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) #ifdef CONFIG_SCHED_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) powerpc_topology[i].name = powerpc_topology[i + 1].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) void __init smp_cpus_done(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * We are running pinned to the boot CPU, see rest_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (smp_ops && smp_ops->setup_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) smp_ops->setup_cpu(boot_cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (smp_ops && smp_ops->bringup_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) smp_ops->bringup_done();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) dump_numa_cpu_topology();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) fixup_topology();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) set_sched_topology(powerpc_topology);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) int __cpu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (!smp_ops->cpu_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) this_cpu_disable_ftrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) err = smp_ops->cpu_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /* Update sibling maps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) remove_cpu_from_masks(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) void __cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (smp_ops->cpu_die)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) smp_ops->cpu_die(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) void arch_cpu_idle_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * Disable on the down path. This will be re-enabled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * start_secondary() via start_secondary_resume() below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) this_cpu_disable_ftrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (smp_ops->cpu_offline_self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) smp_ops->cpu_offline_self();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) /* If we return, we re-enter start_secondary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) start_secondary_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) #endif