^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * x86 SMP booting functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright 2001 Andi Kleen, SuSE Labs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Much of the core SMP work is based on previous work by Thomas Radke, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * whom a great many thanks are extended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Thanks to Intel for making available several different Pentium,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Pentium Pro and Pentium-II/Xeon MP machines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Original development of Linux SMP code supported by Caldera.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Fixes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Felix Koop : NR_CPUS used properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Jose Renau : Handle single CPU case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Alan Cox : By repeated request 8) - Total BogoMIPS report.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Greg Wright : Fix for kernel stacks panic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Erich Boleyn : MP v1.4 and additional changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Matthias Sattler : Changes for 2.1 kernel map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Michel Lespinasse : Changes for 2.1 kernel map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Michael Chastain : Change trampoline.S to gnu as.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Alan Cox : Dumb bug: 'B' step PPro's are fine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Ingo Molnar : Added APIC timers, based on code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * from Jose Renau
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Ingo Molnar : various cleanups and rewrites
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Maciej W. Rozycki : Bits for genuine 82489DX APICs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Andi Kleen : Changed for SMP boot into long mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Martin J. Bligh : Added support for multi-quad systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Dave Jones : Report invalid combinations of Athlon CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Rusty Russell : Hacked into shape for new "hotplug" boot process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Andi Kleen : Converted to new state machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Ashok Raj : CPU hotplug support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Glauber Costa : i386 and x86_64 integration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/sched/topology.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/sched/hotplug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/tboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/cpuidle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/overflow.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <asm/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <asm/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <asm/realmode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <asm/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <asm/mtrr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <asm/mwait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <asm/io_apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include <asm/fpu/internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include <asm/uv/uv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <linux/mc146818rtc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <asm/i8259.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <asm/misc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #include <asm/qspinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #include <asm/intel-family.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #include <asm/cpu_device_id.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #include <asm/spec-ctrl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #include <asm/hw_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #include <asm/stackprotector.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* representing HT siblings of each logical CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* representing HT and core siblings of each logical CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) EXPORT_PER_CPU_SYMBOL(cpu_core_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* representing HT, core, and die siblings of each logical CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) EXPORT_PER_CPU_SYMBOL(cpu_die_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* Per CPU bogomips and other parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) EXPORT_PER_CPU_SYMBOL(cpu_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Logical package management. We might want to allocate that dynamically */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned int __max_logical_packages __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) EXPORT_SYMBOL(__max_logical_packages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static unsigned int logical_packages __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static unsigned int logical_die __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Maximum number of SMT threads on any online core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int __read_mostly __max_smt_threads = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Flag to indicate if a complete sched domain rebuild is required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) bool x86_topology_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int arch_update_cpu_topology(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int retval = x86_topology_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) x86_topology_update = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) spin_lock_irqsave(&rtc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) CMOS_WRITE(0xa, 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) spin_unlock_irqrestore(&rtc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) start_eip >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) start_eip & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline void smpboot_restore_warm_reset_vector(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * Paranoid: Set warm reset code and vector here back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * to default values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) spin_lock_irqsave(&rtc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) CMOS_WRITE(0, 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) spin_unlock_irqrestore(&rtc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static void init_freq_invariance(bool secondary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * Report back to the Boot Processor during boot time or to the caller processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * during CPU online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void smp_callin(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * If waken up by an INIT in an 82489DX configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * cpu_callout_mask guarantees we don't get here before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * an INIT_deassert IPI reaches our local APIC, so it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * now safe to touch our local APIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) cpuid = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * the boot CPU has finished the init stage and is spinning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * on callin_map until we finish. We are free to set up this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * CPU, first the APIC. (this is probably redundant on most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * boards)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) apic_ap_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Save our processor parameters. Note: this information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * is needed for clock calibration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) smp_store_cpu_info(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * The topology information must be up to date before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * calibrate_delay() and notify_cpu_starting().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) set_cpu_sibling_map(raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) init_freq_invariance(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * Get our bogomips.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * Update loops_per_jiffy in cpu_data. Previous call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * smp_store_cpu_info() stored a value that is close but not as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * accurate as the value just calculated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) calibrate_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) pr_debug("Stack at about %p\n", &cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) notify_cpu_starting(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Allow the master to continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) cpumask_set_cpu(cpuid, cpu_callin_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static int cpu0_logical_apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static int enable_start_cpu0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Activate a secondary processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void notrace start_secondary(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Don't put *anything* except direct CPU state initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * before cpu_init(), SMP booting is too fragile that we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * limit the things done here to the most necessary things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) cr4_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* switch away from the initial page table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) load_cr3(swapper_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) __flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) cpu_init_exception_handling();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) x86_cpuinit.early_percpu_clock_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) smp_callin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) enable_start_cpu0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* otherwise gcc will move up smp_processor_id before the cpu_init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * Check TSC synchronization with the boot CPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) check_tsc_sync_target();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) speculative_store_bypass_ht_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * Lock vector_lock, set CPU online and bring the vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * allocator online. Online must be set with vector_lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * to prevent a concurrent irq setup/teardown from seeing a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * half valid vector space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) lock_vector_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) set_cpu_online(smp_processor_id(), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) lapic_online();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) unlock_vector_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) cpu_set_state_online(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) x86_platform.nmi_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* enable local interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) x86_cpuinit.setup_percpu_clockev();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * topology_is_primary_thread - Check whether CPU is the primary SMT thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * @cpu: CPU to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) bool topology_is_primary_thread(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * topology_smt_supported - Check whether SMT is supported by the CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) bool topology_smt_supported(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return smp_num_siblings > 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * topology_phys_to_logical_pkg - Map a physical package id to a logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * Returns logical package id or -1 if not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) int topology_phys_to_logical_pkg(unsigned int phys_pkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct cpuinfo_x86 *c = &cpu_data(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (c->initialized && c->phys_proc_id == phys_pkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return c->logical_proc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) EXPORT_SYMBOL(topology_phys_to_logical_pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * topology_phys_to_logical_die - Map a physical die id to logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * Returns logical die id or -1 if not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int topology_phys_to_logical_die(unsigned int die_id, unsigned int cur_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int proc_id = cpu_data(cur_cpu).phys_proc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct cpuinfo_x86 *c = &cpu_data(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (c->initialized && c->cpu_die_id == die_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) c->phys_proc_id == proc_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return c->logical_die_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) EXPORT_SYMBOL(topology_phys_to_logical_die);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * topology_update_package_map - Update the physical to logical package map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * @pkg: The physical package id as retrieved via CPUID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * @cpu: The cpu for which this is updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int topology_update_package_map(unsigned int pkg, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) int new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* Already available somewhere? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) new = topology_phys_to_logical_pkg(pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (new >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) new = logical_packages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (new != pkg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) pr_info("CPU %u Converting physical %u to logical package %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) cpu, pkg, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) cpu_data(cpu).logical_proc_id = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * topology_update_die_map - Update the physical to logical die map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * @die: The die id as retrieved via CPUID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * @cpu: The cpu for which this is updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int topology_update_die_map(unsigned int die, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* Already available somewhere? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) new = topology_phys_to_logical_die(die, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (new >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) new = logical_die++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (new != die) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) pr_info("CPU %u Converting physical %u to logical die %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cpu, die, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) cpu_data(cpu).logical_die_id = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) void __init smp_store_boot_cpu_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int id = 0; /* CPU 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct cpuinfo_x86 *c = &cpu_data(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) *c = boot_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) c->cpu_index = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) topology_update_package_map(c->phys_proc_id, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) topology_update_die_map(c->cpu_die_id, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) c->initialized = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * The bootstrap kernel entry code has set these up. Save them for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * a given CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) void smp_store_cpu_info(int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct cpuinfo_x86 *c = &cpu_data(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Copy boot_cpu_data only on the first bringup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (!c->initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) *c = boot_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) c->cpu_index = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * During boot time, CPU0 has this setup already. Save the info when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * bringing up AP or offlined CPU0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) identify_secondary_cpu(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) c->initialized = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return (cpu_to_node(cpu1) == cpu_to_node(cpu2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return !WARN_ONCE(!topology_same_node(c, o),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) "[node: %d != %d]. Ignoring dependency.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #define link_mask(mfunc, c1, c2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) cpumask_set_cpu((c1), mfunc(c2)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) cpumask_set_cpu((c2), mfunc(c1)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (c->phys_proc_id == o->phys_proc_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) c->cpu_die_id == o->cpu_die_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (c->cpu_core_id == o->cpu_core_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return topology_sane(c, o, "smt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if ((c->cu_id != 0xff) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) (o->cu_id != 0xff) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) (c->cu_id == o->cu_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return topology_sane(c, o, "smt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) } else if (c->phys_proc_id == o->phys_proc_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) c->cpu_die_id == o->cpu_die_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) c->cpu_core_id == o->cpu_core_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return topology_sane(c, o, "smt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (c->phys_proc_id == o->phys_proc_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) c->cpu_die_id == o->cpu_die_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * Unlike the other levels, we do not enforce keeping a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * multicore group inside a NUMA node. If this happens, we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * discard the MC level of the topology later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (c->phys_proc_id == o->phys_proc_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * Any Intel CPU that has multiple nodes per package and does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * When in SNC mode, these CPUs enumerate an LLC that is shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * by multiple NUMA nodes. The LLC is shared for off-package data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * access but private to the NUMA node (half of the package) for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * on-package access. CPUID (the source of the information about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * the LLC) can only enumerate the cache as shared or unshared,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * but not this particular configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static const struct x86_cpu_id intel_cod_cpu[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0), /* COD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0), /* COD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) X86_MATCH_INTEL_FAM6_MODEL(ANY, 1), /* SNC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) bool intel_snc = id && id->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /* Do not match if we do not have a valid APICID for cpu: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* Do not match if LLC id does not match: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * Allow the SNC topology without warning. Return of false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * means 'c' does not share the LLC of 'o'. This will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * reflected to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return topology_sane(c, o, "llc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static inline int x86_sched_itmt_flags(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) #ifdef CONFIG_SCHED_MC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) static int x86_core_flags(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return cpu_core_flags() | x86_sched_itmt_flags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) #ifdef CONFIG_SCHED_SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static int x86_smt_flags(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return cpu_smt_flags() | x86_sched_itmt_flags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) #ifdef CONFIG_SCHED_SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) #ifdef CONFIG_SCHED_MC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) { NULL, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static struct sched_domain_topology_level x86_topology[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) #ifdef CONFIG_SCHED_SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) #ifdef CONFIG_SCHED_MC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) { cpu_cpu_mask, SD_INIT_NAME(DIE) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) { NULL, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * Set if a package/die has multiple NUMA nodes inside.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * AMD Magny-Cours, Intel Cluster-on-Die, and Intel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * Sub-NUMA Clustering have this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static bool x86_has_numa_in_package;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) void set_cpu_sibling_map(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) bool has_smt = smp_num_siblings > 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct cpuinfo_x86 *c = &cpu_data(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct cpuinfo_x86 *o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) int i, threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (!has_mp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) cpumask_set_cpu(cpu, topology_die_cpumask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) c->booted_cores = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) for_each_cpu(i, cpu_sibling_setup_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) o = &cpu_data(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (match_pkg(c, o) && !topology_same_node(c, o))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) x86_has_numa_in_package = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if ((i == cpu) || (has_smt && match_smt(c, o)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) link_mask(topology_sibling_cpumask, cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if ((i == cpu) || (has_mp && match_llc(c, o)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) link_mask(cpu_llc_shared_mask, cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if ((i == cpu) || (has_mp && match_die(c, o)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) link_mask(topology_die_cpumask, cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) threads = cpumask_weight(topology_sibling_cpumask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (threads > __max_smt_threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) __max_smt_threads = threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * This needs a separate iteration over the cpus because we rely on all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * topology_sibling_cpumask links to be set-up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) for_each_cpu(i, cpu_sibling_setup_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) o = &cpu_data(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if ((i == cpu) || (has_mp && match_pkg(c, o))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) link_mask(topology_core_cpumask, cpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * Does this new cpu bringup a new core?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (threads == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * for each core in package, increment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * the booted_cores for this new cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (cpumask_first(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) topology_sibling_cpumask(i)) == i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) c->booted_cores++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * increment the core count for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * the other cpus in this package
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (i != cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) cpu_data(i).booted_cores++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) } else if (i != cpu && !c->booted_cores)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) c->booted_cores = cpu_data(i).booted_cores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /* maps the cpu to the sched domain representing multi-core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) const struct cpumask *cpu_coregroup_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return cpu_llc_shared_mask(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static void impress_friends(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unsigned long bogosum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * Allow the user to impress friends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) pr_debug("Before bogomips\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (cpumask_test_cpu(cpu, cpu_callout_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) bogosum += cpu_data(cpu).loops_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) num_online_cpus(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) bogosum/(500000/HZ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) (bogosum/(5000/HZ))%100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) pr_debug("Before bogocount - setting activated=1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) void __inquire_remote_apic(int apicid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) const char * const names[] = { "ID", "VERSION", "SPIV" };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) pr_info("Inquiring remote APIC 0x%x...\n", apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) for (i = 0; i < ARRAY_SIZE(regs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) pr_info("... APIC 0x%x %s: ", apicid, names[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * Wait for idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) status = safe_apic_wait_icr_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) pr_cont("a previous APIC delivery may have failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) case APIC_ICR_RR_VALID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) status = apic_read(APIC_RRR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) pr_cont("%08x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) pr_cont("failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * The Multiprocessor Specification 1.4 (1997) example code suggests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * that there should be a 10ms delay between the BSP asserting INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * and de-asserting INIT, when starting a remote processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * But that slows boot and resume on modern processors, which include
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * many cores and don't require that delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * Cmdline "init_cpu_udelay=" is available to over-ride this delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * Modern processor families are quirked to remove the delay entirely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) #define UDELAY_10MS_DEFAULT 10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static unsigned int init_udelay = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static int __init cpu_init_udelay(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) get_option(&str, &init_udelay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) early_param("cpu_init_udelay", cpu_init_udelay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static void __init smp_quirk_init_udelay(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* if cmdline changed it from default, leave it alone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (init_udelay != UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* if modern processor, use no delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) init_udelay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /* else, use legacy delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) init_udelay = UDELAY_10MS_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * won't ... remember to clear down the APIC, etc later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) unsigned long send_status, accept_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) int maxlvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /* Target chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /* Boot on the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /* Kick the second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) apic_icr_write(APIC_DM_NMI | apic->dest_logical, apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) pr_debug("Waiting for send to finish...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) send_status = safe_apic_wait_icr_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * Give the other CPU some time to accept the IPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) udelay(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (APIC_INTEGRATED(boot_cpu_apic_version)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) maxlvt = lapic_get_maxlvt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) accept_status = (apic_read(APIC_ESR) & 0xEF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) pr_debug("NMI sent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (send_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) pr_err("APIC never delivered???\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (accept_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) pr_err("APIC delivery error (%lx)\n", accept_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return (send_status | accept_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) unsigned long send_status = 0, accept_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) int maxlvt, num_starts, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) maxlvt = lapic_get_maxlvt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * Be paranoid about clearing APIC errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (APIC_INTEGRATED(boot_cpu_apic_version)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) apic_read(APIC_ESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) pr_debug("Asserting INIT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * Turn INIT on target chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * Send IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) phys_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) pr_debug("Waiting for send to finish...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) send_status = safe_apic_wait_icr_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) udelay(init_udelay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) pr_debug("Deasserting INIT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /* Target chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* Send IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) pr_debug("Waiting for send to finish...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) send_status = safe_apic_wait_icr_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * Should we send STARTUP IPIs ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * Determine this based on the APIC version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * If we don't have an integrated APIC, don't send the STARTUP IPIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (APIC_INTEGRATED(boot_cpu_apic_version))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) num_starts = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) num_starts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * Run STARTUP IPI loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) pr_debug("#startup loops: %d\n", num_starts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) for (j = 1; j <= num_starts; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) pr_debug("Sending STARTUP #%d\n", j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) apic_read(APIC_ESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) pr_debug("After apic_write\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * STARTUP IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /* Target chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* Boot on the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* Kick the second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) phys_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * Give the other CPU some time to accept the IPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (init_udelay == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) udelay(300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) pr_debug("Startup point 1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) pr_debug("Waiting for send to finish...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) send_status = safe_apic_wait_icr_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * Give the other CPU some time to accept the IPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (init_udelay == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) udelay(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) accept_status = (apic_read(APIC_ESR) & 0xEF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (send_status || accept_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) pr_debug("After Startup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (send_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) pr_err("APIC never delivered???\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (accept_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) pr_err("APIC delivery error (%lx)\n", accept_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return (send_status | accept_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* reduce the number of lines printed when booting a large cpu count system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) static void announce_cpu(int cpu, int apicid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static int current_node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int node = early_cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) static int width, node_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (!width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (!node_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (cpu == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) printk(KERN_INFO "x86: Booting SMP configuration:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (system_state < SYSTEM_RUNNING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (node != current_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (current_node > (-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) current_node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) printk(KERN_INFO ".... node %*s#%d, CPUs: ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) node_width - num_digits(node), " ", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* Add padding for the BSP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (cpu == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) pr_cont("%*s", width + 1, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) pr_info("Booting Node %d Processor %d APIC 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) node, cpu, apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return NMI_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return NMI_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * Wake up AP by INIT, INIT, STARTUP sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * Instead of waiting for STARTUP after INITs, BSP will execute the BIOS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * boot-strap code which is not a desired behavior for waking up BSP. To
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * void the boot-strap code, wake up CPU0 by NMI instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * This works to wake up soft offlined CPU0 only. If CPU0 is hard offlined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * (i.e. physically hot removed and then hot added), NMI won't wake it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * We'll change this code in the future to wake up hard offlined CPU0 if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * real platform and request are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) int *cpu0_nmi_registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) int boot_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * Wake up AP by INIT, INIT, STARTUP sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * Wake up BSP by nmi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * Register a NMI handler to help wake up CPU0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) boot_error = register_nmi_handler(NMI_LOCAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) wakeup_cpu0_nmi, 0, "wake_cpu0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (!boot_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) enable_start_cpu0 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) *cpu0_nmi_registered = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (apic->dest_logical == APIC_DEST_LOGICAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) id = cpu0_logical_apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) id = apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return boot_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) int common_cpu_up(unsigned int cpu, struct task_struct *idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /* Just in case we booted with a single CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) alternatives_enable_smp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) per_cpu(current_task, cpu) = idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) cpu_init_stack_canary(cpu, idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /* Initialize the interrupt stack(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ret = irq_init_percpu_irqstack(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /* Stack for startup_32 can be just as for start_secondary onwards */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) initial_gs = per_cpu_offset(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * Returns zero if CPU booted OK, else error code from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * ->wakeup_secondary_cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) int *cpu0_nmi_registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /* start_ip had better be page-aligned! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) unsigned long start_ip = real_mode_header->trampoline_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) unsigned long boot_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) idle->thread.sp = (unsigned long)task_pt_regs(idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) initial_code = (unsigned long)start_secondary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) initial_stack = idle->thread.sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /* Enable the espfix hack for this CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) init_espfix_ap(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /* So we see what's up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) announce_cpu(cpu, apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * This grunge runs the startup process for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * the targeted processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (x86_platform.legacy.warm_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) pr_debug("Setting warm reset code and vector.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) smpboot_setup_warm_reset_vector(start_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * Be paranoid about clearing APIC errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (APIC_INTEGRATED(boot_cpu_apic_version)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) apic_write(APIC_ESR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) apic_read(APIC_ESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * AP might wait on cpu_callout_mask in cpu_init() with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * cpu_initialized_mask set if previous attempt to online
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * it timed-out. Clear cpu_initialized_mask so that after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * INIT/SIPI it could start with a clean state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) cpumask_clear_cpu(cpu, cpu_initialized_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * Wake up a CPU in difference cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * - Use the method in the APIC driver if it's defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * - Use an INIT boot APIC message for APs or NMI for BSP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (apic->wakeup_secondary_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) cpu0_nmi_registered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (!boot_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * Wait 10s total for first sign of life from AP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) boot_error = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) timeout = jiffies + 10*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) while (time_before(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * Tell AP to proceed with initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) cpumask_set_cpu(cpu, cpu_callout_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) boot_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (!boot_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * Wait till AP completes initial initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * Allow other tasks to run while we wait for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * AP to come online. This also gives a chance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * for the MTRR work(triggered by the AP coming online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * to be completed in the stop machine context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (x86_platform.legacy.warm_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * Cleanup possible dangling ends...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) smpboot_restore_warm_reset_vector();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) return boot_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) int apicid = apic->cpu_present_to_apicid(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) int cpu0_nmi_registered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) int err, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) lockdep_assert_irqs_enabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (apicid == BAD_APICID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) !physid_isset(apicid, phys_cpu_present_map) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) !apic->apic_id_valid(apicid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) pr_err("%s: bad cpu %d\n", __func__, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * Already booted CPU?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) pr_debug("do_boot_cpu %d Already started\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * Save current MTRR state in case it was changed since early boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) mtrr_save_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) /* x86 CPUs take themselves offline, so delayed offline is OK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) err = cpu_check_up_prepare(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (err && err != -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /* the FPU context is blank, nobody can own it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) err = common_cpu_up(cpu, tidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) goto unreg_nmi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * Check TSC synchronization with the AP (keep irqs disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * while doing so):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) check_tsc_sync_source(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) while (!cpu_online(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) touch_nmi_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) unreg_nmi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * Clean up the nmi handler. Do this after the callin and callout sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * to avoid impact of possible long unregister time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (cpu0_nmi_registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * arch_disable_smp_support() - disables SMP support for x86 at runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) void arch_disable_smp_support(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) disable_ioapic_support();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * Fall back to non SMP mode after errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * RED-PEN audit/test this more. I bet there is more state messed up here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) static __init void disable_smp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) pr_info("SMP disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) disable_ioapic_support();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) init_cpu_present(cpumask_of(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) init_cpu_possible(cpumask_of(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (smp_found_config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) physid_set_mask_of_physid(0, &phys_cpu_present_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) cpumask_set_cpu(0, topology_sibling_cpumask(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) cpumask_set_cpu(0, topology_core_cpumask(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) cpumask_set_cpu(0, topology_die_cpumask(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * Various sanity checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) static void __init smp_sanity_check(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) #if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (def_to_bigsmp && nr_cpu_ids > 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) unsigned nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) pr_warn("More than 8 CPUs detected - skipping them\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) "Use CONFIG_X86_BIGSMP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) for_each_present_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (nr >= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) set_cpu_present(cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) nr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (nr >= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) set_cpu_possible(cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) nr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) nr_cpu_ids = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) hard_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) physid_set(hard_smp_processor_id(), phys_cpu_present_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * Should not be necessary because the MP table should list the boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * CPU too, but we do it for the sake of robustness anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) boot_cpu_physical_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) physid_set(hard_smp_processor_id(), phys_cpu_present_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static void __init smp_cpu_index_default(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct cpuinfo_x86 *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) c = &cpu_data(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /* mark all to hotplug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) c->cpu_index = nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static void __init smp_get_logical_apicid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (x2apic_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) cpu0_logical_apicid = apic_read(APIC_LDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * Prepare for SMP bootup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * @max_cpus: configured maximum number of CPUs, It is a legacy parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * for common interface support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) void __init native_smp_prepare_cpus(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) smp_cpu_index_default();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * Setup boot CPU information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) smp_store_boot_cpu_info(); /* Final full version of the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) cpumask_copy(cpu_callin_mask, cpumask_of(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * Set 'default' x86 topology, this matches default_topology() in that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * it has NUMA nodes as a topology level. See also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * native_smp_cpus_done().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * Must be done before set_cpus_sibling_map() is ran.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) set_sched_topology(x86_topology);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) set_cpu_sibling_map(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) init_freq_invariance(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) smp_sanity_check();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) switch (apic_intr_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) case APIC_PIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) case APIC_VIRTUAL_WIRE_NO_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) disable_smp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) case APIC_SYMMETRIC_IO_NO_ROUTING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) disable_smp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) /* Setup local timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) x86_init.timers.setup_percpu_clockev();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) case APIC_VIRTUAL_WIRE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) case APIC_SYMMETRIC_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) /* Setup local timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) x86_init.timers.setup_percpu_clockev();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) smp_get_logical_apicid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) pr_info("CPU0: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) print_cpu_info(&cpu_data(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) uv_system_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) set_mtrr_aps_delayed_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) smp_quirk_init_udelay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) speculative_store_bypass_ht_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) void arch_thaw_secondary_cpus_begin(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) set_mtrr_aps_delayed_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) void arch_thaw_secondary_cpus_end(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) mtrr_aps_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * Early setup to make printk work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) void __init native_smp_prepare_boot_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) int me = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) switch_to_new_gdt(me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* already set me in cpu_online_mask in boot_cpu_init() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) cpumask_set_cpu(me, cpu_callout_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) cpu_set_state_online(me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) native_pv_lock_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) void __init calculate_max_logical_packages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) int ncpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * Today neither Intel nor AMD support heterogenous systems so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) * extrapolate the boot cpu's data to all packages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) pr_info("Max logical packages: %u\n", __max_logical_packages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) void __init native_smp_cpus_done(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) pr_debug("Boot done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) calculate_max_logical_packages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (x86_has_numa_in_package)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) set_sched_topology(x86_numa_in_package_topology);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) nmi_selftest();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) impress_friends();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) mtrr_aps_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static int __initdata setup_possible_cpus = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) static int __init _setup_possible_cpus(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) get_option(&str, &setup_possible_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) early_param("possible_cpus", _setup_possible_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * cpu_possible_mask should be static, it cannot change as cpu's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * are onlined, or offlined. The reason is per-cpu data-structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * are allocated by some modules at init time, and don't expect to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * do this dynamically on cpu arrival/departure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * cpu_present_mask on the other hand can change dynamically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * In case when cpu_hotplug is not compiled, then we resort to current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * behaviour, which is cpu_possible == cpu_present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * - Ashok Raj
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * Three ways to find out the number of additional hotplug CPUs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * - The user can overwrite it with possible_cpus=NUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * - Otherwise don't reserve additional CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * We do this because additional CPUs waste a lot of memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * -AK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) __init void prefill_possible_map(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) int i, possible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /* No boot processor was found in mptable or ACPI MADT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (!num_processors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if (boot_cpu_has(X86_FEATURE_APIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) int apicid = boot_cpu_physical_apicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) int cpu = hard_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) /* Make sure boot cpu is enumerated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) apic->apic_id_valid(apicid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) generic_processor_info(apicid, boot_cpu_apic_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (!num_processors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) num_processors = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) i = setup_max_cpus ?: 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (setup_possible_cpus == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) possible = num_processors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (setup_max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) possible += disabled_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (possible > i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) possible = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) possible = setup_possible_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) total_cpus = max_t(int, possible, num_processors + disabled_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /* nr_cpu_ids could be reduced via nr_cpus= */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (possible > nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) pr_warn("%d Processors exceeds NR_CPUS limit of %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) possible, nr_cpu_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) possible = nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (!setup_max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (possible > i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) pr_warn("%d Processors exceeds max_cpus limit of %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) possible, setup_max_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) possible = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) nr_cpu_ids = possible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) possible, max_t(int, possible - num_processors, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) reset_cpu_possible_mask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) for (i = 0; i < possible; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) set_cpu_possible(i, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) /* Recompute SMT state for all CPUs on offline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) static void recompute_smt_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) int max_threads, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) max_threads = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) for_each_online_cpu (cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) int threads = cpumask_weight(topology_sibling_cpumask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (threads > max_threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) max_threads = threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) __max_smt_threads = max_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) static void remove_siblinginfo(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) int sibling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) struct cpuinfo_x86 *c = &cpu_data(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) for_each_cpu(sibling, topology_core_cpumask(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) /*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * last thread sibling in this cpu core going down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) cpu_data(sibling).booted_cores--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) for_each_cpu(sibling, topology_die_cpumask(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) cpumask_clear_cpu(cpu, topology_die_cpumask(sibling));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) for_each_cpu(sibling, topology_sibling_cpumask(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) cpumask_clear(cpu_llc_shared_mask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) cpumask_clear(topology_sibling_cpumask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) cpumask_clear(topology_core_cpumask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) cpumask_clear(topology_die_cpumask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) c->cpu_core_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) c->booted_cores = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) recompute_smt_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) static void remove_cpu_from_maps(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) set_cpu_online(cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) cpumask_clear_cpu(cpu, cpu_callout_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) cpumask_clear_cpu(cpu, cpu_callin_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) /* was set by cpu_init() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) cpumask_clear_cpu(cpu, cpu_initialized_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) numa_remove_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) void cpu_disable_common(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) remove_siblinginfo(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) /* It's now safe to remove this processor from the online map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) lock_vector_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) remove_cpu_from_maps(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) unlock_vector_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) fixup_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) lapic_offline();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) int native_cpu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) ret = lapic_can_unplug_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) cpu_disable_common();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * Disable the local APIC. Otherwise IPI broadcasts will reach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) * it. It still responds normally to INIT, NMI, SMI, and SIPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) * Disabling the APIC must happen after cpu_disable_common()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) * which invokes fixup_irqs().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) * Disabling the APIC preserves already set bits in IRR, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) * an interrupt arriving after disabling the local APIC does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * set the corresponding IRR bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) * fixup_irqs() scans IRR for set bits so it can raise a not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) * yet handled interrupt on the new destination CPU via an IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) * but obviously it can't do so for IRR bits which are not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) * IOW, interrupts arriving after disabling the local APIC will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) * be lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) apic_soft_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) int common_cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) /* We don't do anything here: idle task is faking death itself. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) /* They ack this in play_dead() by setting CPU_DEAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (cpu_wait_death(cpu, 5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (system_state == SYSTEM_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) pr_info("CPU %u is now offline\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) pr_err("CPU %u didn't die...\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) void native_cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) common_cpu_die(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) void play_dead_common(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) idle_task_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) /* Ack it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) (void)cpu_report_death();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * With physical CPU hotplug, we should halt the cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) * cond_wakeup_cpu0 - Wake up CPU0 if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) * If NMI wants to wake up CPU0, start CPU0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) void cond_wakeup_cpu0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (smp_processor_id() == 0 && enable_start_cpu0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) start_cpu0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) EXPORT_SYMBOL_GPL(cond_wakeup_cpu0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) * We need to flush the caches before going to sleep, lest we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * dirty data in our caches when we come back up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) static inline void mwait_play_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) unsigned int eax, ebx, ecx, edx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) unsigned int highest_cstate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) unsigned int highest_subcstate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) void *mwait_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (!this_cpu_has(X86_FEATURE_MWAIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (!this_cpu_has(X86_FEATURE_CLFLUSH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) eax = CPUID_MWAIT_LEAF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) ecx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) native_cpuid(&eax, &ebx, &ecx, &edx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) * eax will be 0 if EDX enumeration is not valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * Initialized below to cstate, sub_cstate value when EDX is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) eax = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) edx >>= MWAIT_SUBSTATE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (edx & MWAIT_SUBSTATE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) highest_cstate = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) (highest_subcstate - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) * This should be a memory location in a cache line which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) * unlikely to be touched by other processors. The actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * content is immaterial as it is not actually modified in any way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) mwait_ptr = ¤t_thread_info()->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) wbinvd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) * The CLFLUSH is a workaround for erratum AAI65 for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * the Xeon 7400 series. It's not clear it is actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) * needed, but it should be harmless in either case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) * The WBINVD is insufficient due to the spurious-wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) * case where we return around the loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) clflush(mwait_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) __monitor(mwait_ptr, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) __mwait(eax, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) cond_wakeup_cpu0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) void hlt_play_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (__this_cpu_read(cpu_info.x86) >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) wbinvd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) native_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) cond_wakeup_cpu0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) void native_play_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) play_dead_common();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) tboot_shutdown(TB_SHUTDOWN_WFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) mwait_play_dead(); /* Only returns on failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (cpuidle_play_dead())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) hlt_play_dead();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) #else /* ... !CONFIG_HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) int native_cpu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) void native_cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) /* We said "no" in __cpu_disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) void native_play_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * APERF/MPERF frequency ratio computation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) * The scheduler wants to do frequency invariant accounting and needs a <1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) * ratio to account for the 'current' frequency, corresponding to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * freq_curr / freq_max.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) * Since the frequency freq_curr on x86 is controlled by micro-controller and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) * our P-state setting is little more than a request/hint, we need to observe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * the effective frequency 'BusyMHz', i.e. the average frequency over a time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * interval after discarding idle time. This is given by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * BusyMHz = delta_APERF / delta_MPERF * freq_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * where freq_base is the max non-turbo P-state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) * The freq_max term has to be set to a somewhat arbitrary value, because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * can't know which turbo states will be available at a given point in time:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * it all depends on the thermal headroom of the entire package. We set it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * the turbo level with 4 cores active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) * Benchmarks show that's a good compromise between the 1C turbo ratio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) * (freq_curr/freq_max would rarely reach 1) and something close to freq_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) * which would ignore the entire turbo range (a conspicuous part, making
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) * freq_curr/freq_max always maxed out).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) * An exception to the heuristic above is the Atom uarch, where we choose the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) * highest turbo level for freq_max since Atom's are generally oriented towards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) * power efficiency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) * Setting freq_max to anything less than the 1C turbo ratio makes the ratio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * freq_curr / freq_max to eventually grow >1, in which case we clip it to 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) DEFINE_STATIC_KEY_FALSE(arch_scale_freq_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) static DEFINE_PER_CPU(u64, arch_prev_aperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) static DEFINE_PER_CPU(u64, arch_prev_mperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) static u64 arch_turbo_freq_ratio = SCHED_CAPACITY_SCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) static u64 arch_max_freq_ratio = SCHED_CAPACITY_SCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) void arch_set_max_freq_ratio(bool turbo_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) arch_turbo_freq_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) EXPORT_SYMBOL_GPL(arch_set_max_freq_ratio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) static bool turbo_disabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) u64 misc_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) err = rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_en);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) return (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) static bool slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) err = rdmsrl_safe(MSR_ATOM_CORE_RATIOS, base_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) err = rdmsrl_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) *base_freq = (*base_freq >> 16) & 0x3F; /* max P state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) *turbo_freq = *turbo_freq & 0x3F; /* 1C turbo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) #include <asm/cpu_device_id.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) #include <asm/intel-family.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) #define X86_MATCH(model) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) INTEL_FAM6_##model, X86_FEATURE_APERFMPERF, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) static const struct x86_cpu_id has_knl_turbo_ratio_limits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) X86_MATCH(XEON_PHI_KNL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) X86_MATCH(XEON_PHI_KNM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) static const struct x86_cpu_id has_skx_turbo_ratio_limits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) X86_MATCH(SKYLAKE_X),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) static const struct x86_cpu_id has_glm_turbo_ratio_limits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) X86_MATCH(ATOM_GOLDMONT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) X86_MATCH(ATOM_GOLDMONT_D),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) X86_MATCH(ATOM_GOLDMONT_PLUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) static bool knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) int num_delta_fratio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) int fratio, delta_fratio, found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) u64 msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) fratio = (msr >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) i = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (found >= num_delta_fratio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) *turbo_freq = fratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) delta_fratio = (msr >> (i + 5)) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) if (delta_fratio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) found += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) fratio -= delta_fratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) i += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) } while (i < 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) static bool skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) u64 ratios, counts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) u32 group_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &ratios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT1, &counts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) for (i = 0; i < 64; i += 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) group_size = (counts >> i) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if (group_size >= size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) *turbo_freq = (ratios >> i) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) static bool core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) u64 msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) *turbo_freq = (msr >> 24) & 0xFF; /* 4C turbo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) /* The CPU may have less than 4 cores */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) if (!*turbo_freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) *turbo_freq = msr & 0xFF; /* 1C turbo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) static bool intel_set_max_freq_ratio(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) u64 base_freq, turbo_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) u64 turbo_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) if (slv_set_max_freq_ratio(&base_freq, &turbo_freq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) if (x86_match_cpu(has_glm_turbo_ratio_limits) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) skx_set_max_freq_ratio(&base_freq, &turbo_freq, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) if (x86_match_cpu(has_knl_turbo_ratio_limits) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) knl_set_max_freq_ratio(&base_freq, &turbo_freq, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (x86_match_cpu(has_skx_turbo_ratio_limits) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) skx_set_max_freq_ratio(&base_freq, &turbo_freq, 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) if (core_set_max_freq_ratio(&base_freq, &turbo_freq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) * Some hypervisors advertise X86_FEATURE_APERFMPERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * but then fill all MSR's with zeroes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * Some CPUs have turbo boost but don't declare any turbo ratio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * in MSR_TURBO_RATIO_LIMIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (!base_freq || !turbo_freq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) pr_debug("Couldn't determine cpu base or turbo frequency, necessary for scale-invariant accounting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) turbo_ratio = div_u64(turbo_freq * SCHED_CAPACITY_SCALE, base_freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) if (!turbo_ratio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) pr_debug("Non-zero turbo and base frequencies led to a 0 ratio.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) arch_turbo_freq_ratio = turbo_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) arch_set_max_freq_ratio(turbo_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) static void init_counter_refs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) u64 aperf, mperf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) rdmsrl(MSR_IA32_APERF, aperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) rdmsrl(MSR_IA32_MPERF, mperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) this_cpu_write(arch_prev_aperf, aperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) this_cpu_write(arch_prev_mperf, mperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) static void init_freq_invariance(bool secondary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) if (secondary) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) if (static_branch_likely(&arch_scale_freq_key)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) init_counter_refs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) ret = intel_set_max_freq_ratio();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) init_counter_refs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) static_branch_enable(&arch_scale_freq_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) static void disable_freq_invariance_workfn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) static_branch_disable(&arch_scale_freq_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) static DECLARE_WORK(disable_freq_invariance_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) disable_freq_invariance_workfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) void arch_scale_freq_tick(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) u64 freq_scale = SCHED_CAPACITY_SCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) u64 aperf, mperf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) u64 acnt, mcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (!arch_scale_freq_invariant())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) rdmsrl(MSR_IA32_APERF, aperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) rdmsrl(MSR_IA32_MPERF, mperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) acnt = aperf - this_cpu_read(arch_prev_aperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) mcnt = mperf - this_cpu_read(arch_prev_mperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) this_cpu_write(arch_prev_aperf, aperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) this_cpu_write(arch_prev_mperf, mperf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) if (check_mul_overflow(mcnt, arch_max_freq_ratio, &mcnt) || !mcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) freq_scale = div64_u64(acnt, mcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (!freq_scale)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) if (freq_scale > SCHED_CAPACITY_SCALE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) freq_scale = SCHED_CAPACITY_SCALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) this_cpu_write(arch_freq_scale, freq_scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) pr_warn("Scheduler frequency invariance went wobbly, disabling!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) schedule_work(&disable_freq_invariance_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) static inline void init_freq_invariance(bool secondary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) #endif /* CONFIG_X86_64 */