Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * SMP boot-related support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *	David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 2001, 2004-2005 Intel Corp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * 	Rohit Seth <rohit.seth@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * 	Suresh Siddha <suresh.b.siddha@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * 	Gordon Jin <gordon.jin@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *	Ashok Raj  <ashok.raj@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * 01/05/16 Rohit Seth <rohit.seth@intel.com>	Moved SMP booting functions from smp.c to here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * 01/04/27 David Mosberger <davidm@hpl.hp.com>	Added ITC synching code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * 02/07/31 David Mosberger <davidm@hpl.hp.com>	Switch over to hotplug-CPU boot-sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *						smp_boot_cpus()/smp_commence() is replaced by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *						smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * 04/06/21 Ashok Raj		<ashok.raj@intel.com> Added CPU Hotplug Support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * 04/12/26 Jin Gordon <gordon.jin@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * 04/12/26 Rohit Seth <rohit.seth@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *						Add multi-threading and multi-core detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *						Setup cpu_sibling_map and cpu_core_map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #include <asm/current.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #include <asm/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #include <asm/mca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #include <asm/sal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define SMP_DEBUG 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #if SMP_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define Dprintk(x...)  printk(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define Dprintk(x...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #ifdef CONFIG_PERMIT_BSP_REMOVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define bsp_remove_ok	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #define bsp_remove_ok	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * Global array allocated for NR_CPUS at boot time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * start_ap in head.S uses this to store current booting cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  * info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #define set_brendez_area(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * ITC synchronization related stuff:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #define MASTER	(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #define SLAVE	(SMP_CACHE_BYTES/8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #define NUM_ROUNDS	64	/* magic value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #define NUM_ITERS	5	/* likewise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static DEFINE_SPINLOCK(itc_sync_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static volatile unsigned long go[SLAVE + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define DEBUG_ITC_SYNC	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) extern void start_ap (void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) extern unsigned long ia64_iobase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct task_struct *task_for_booting_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * State for each CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) DEFINE_PER_CPU(int, cpu_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) EXPORT_SYMBOL(cpu_core_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int smp_num_siblings = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* which logical CPU number maps to which CPU (physical APIC ID) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) volatile int ia64_cpu_to_sapicid[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) EXPORT_SYMBOL(ia64_cpu_to_sapicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static cpumask_t cpu_callin_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct smp_boot_data smp_boot_data __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) char __initdata no_int_routing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #ifdef CONFIG_FORCE_CPEI_RETARGET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define CPEI_OVERRIDE_DEFAULT	(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define CPEI_OVERRIDE_DEFAULT	(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) cmdl_force_cpei(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	int value=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	get_option (&str, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	force_cpei_retarget = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) __setup("force_cpei=", cmdl_force_cpei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) nointroute (char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	no_int_routing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	printk ("no_int_routing on\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) __setup("nointroute", nointroute);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static void fix_b0_for_bsp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	int cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	static int fix_bsp_b0 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	cpuid = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	 * Cache the b0 value on the first AP that comes up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	if (!(fix_bsp_b0 && cpuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	printk ("Fixed BSP b0 value from CPU %d\n", cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	fix_bsp_b0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) sync_master (void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	unsigned long flags, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	go[MASTER] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			while (!go[MASTER])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 				cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			go[MASTER] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			go[SLAVE] = ia64_get_itc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  * Return the number of cycles by which our itc differs from the itc on the master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  * (time-keeper) CPU.  A positive number indicates our itc is ahead of the master,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  * negative that it is behind.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static inline long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) get_delta (long *rt, long *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	unsigned long tcenter, t0, t1, tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	for (i = 0; i < NUM_ITERS; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		t0 = ia64_get_itc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		go[MASTER] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		while (!(tm = go[SLAVE]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		go[SLAVE] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		t1 = ia64_get_itc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		if (t1 - t0 < best_t1 - best_t0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			best_t0 = t0, best_t1 = t1, best_tm = tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	*rt = best_t1 - best_t0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	*master = best_tm - best_t0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	/* average best_t0 and best_t1 without overflow: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	tcenter = (best_t0/2 + best_t1/2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	if (best_t0 % 2 + best_t1 % 2 == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		++tcenter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	return tcenter - best_tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * (normally the time-keeper CPU).  We use a closed loop to eliminate the possibility of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  * unaccounted-for errors (such as getting a machine check in the middle of a calibration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  * step).  The basic idea is for the slave to ask the master what itc value it has and to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  * read its own itc before and after the master responds.  Each iteration gives us three
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)  * timestamps:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  *	slave		master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  *	t0 ---\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  *             ---\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  *		   --->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  *			tm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  *		   /---
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  *	       /---
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  *	t1 <---
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  * and t1.  If we achieve this, the clocks are synchronized provided the interconnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * between the slave and the master is symmetric.  Even if the interconnect were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * asymmetric, we would still know that the synchronization error is smaller than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  * roundtrip latency (t0 - t1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  * When the interconnect is quiet and symmetric, this lets us synchronize the itc to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  * within one or two cycles.  However, we can only *guarantee* that the synchronization is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)  * accurate to within a round-trip time, which is typically in the range of several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)  * hundred cycles (e.g., ~500 cycles).  In practice, this means that the itc's are usually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  * than half a micro second or so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ia64_sync_itc (unsigned int master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	long i, delta, adj, adjust_latency = 0, done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	unsigned long flags, rt, master_time_stamp, bound;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #if DEBUG_ITC_SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		long rt;	/* roundtrip time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		long master;	/* master's timestamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		long diff;	/* difference between midpoint and master's timestamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		long lat;	/* estimate of itc adjustment latency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	} t[NUM_ROUNDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	 * Make sure local timer ticks are disabled while we sync.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	 * they were enabled, we'd have to worry about nasty issues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	 * like setting the ITC ahead of (or a long time before) the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	 * next scheduled tick.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	go[MASTER] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	while (go[MASTER])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		cpu_relax();	/* wait for master to be ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	spin_lock_irqsave(&itc_sync_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		for (i = 0; i < NUM_ROUNDS; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 			delta = get_delta(&rt, &master_time_stamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			if (delta == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 				done = 1;	/* let's lock on to this... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 				bound = rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			if (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 				if (i > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 					adjust_latency += -delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 					adj = -delta + adjust_latency/4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 				} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 					adj = -delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 				ia64_set_itc(ia64_get_itc() + adj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #if DEBUG_ITC_SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 			t[i].rt = rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			t[i].master = master_time_stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			t[i].diff = delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 			t[i].lat = adjust_latency/4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	spin_unlock_irqrestore(&itc_sync_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #if DEBUG_ITC_SYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	for (i = 0; i < NUM_ROUNDS; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	       "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static inline void smp_setup_percpu_timer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) smp_callin (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	int cpuid, phys_id, itc_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	extern void ia64_init_itm(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	extern volatile int time_keeper_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	cpuid = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	phys_id = hard_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	itc_master = time_keeper_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	if (cpu_online(cpuid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		       phys_id, cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	fix_b0_for_bsp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	 * numa_node_id() works after this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	set_numa_node(cpu_to_node_map[cpuid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	spin_lock(&vector_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	/* Setup the per cpu irq handling data structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	__setup_vector_irq(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	notify_cpu_starting(cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	set_cpu_online(cpuid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	per_cpu(cpu_state, cpuid) = CPU_ONLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	spin_unlock(&vector_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	smp_setup_percpu_timer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	ia64_mca_cmc_vector_setup();	/* Setup vector on AP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		 * Synchronize the ITC with the BP.  Need to do this after irqs are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		 * enabled because ia64_sync_itc() calls smp_call_function_single(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		 * local_bh_enable(), which bugs out if irqs are not enabled...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		Dprintk("Going to syncup ITC with ITC Master.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		ia64_sync_itc(itc_master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	 * Get our bogomips.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	ia64_init_itm();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	 * Delay calibration can be skipped if new processor is identical to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	 * previous processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	last_cpuinfo = cpu_data(cpuid - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	this_cpuinfo = local_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	    last_cpuinfo->proc_freq != this_cpuinfo->proc_freq ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	    last_cpuinfo->features != this_cpuinfo->features ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	    last_cpuinfo->revision != this_cpuinfo->revision ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	    last_cpuinfo->family != this_cpuinfo->family ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	    last_cpuinfo->archrev != this_cpuinfo->archrev ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	    last_cpuinfo->model != this_cpuinfo->model)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		calibrate_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	local_cpu_data->loops_per_jiffy = loops_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	 * Allow the master to continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	cpumask_set_cpu(cpuid, &cpu_callin_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)  * Activate a secondary processor.  head.S calls this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) start_secondary (void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	/* Early console may use I/O ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) #ifndef CONFIG_PRINTK_TIME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	efi_map_pal_code();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	smp_callin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	task_for_booting_cpu = idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	set_brendez_area(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	ia64_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	 * Wait 10s total for the AP to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	Dprintk("Waiting on callin_map ...");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	for (timeout = 0; timeout < 100000; timeout++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		if (cpumask_test_cpu(cpu, &cpu_callin_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			break;  /* It has booted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		barrier(); /* Make sure we re-read cpu_callin_map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	Dprintk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	if (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		ia64_cpu_to_sapicid[cpu] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		set_cpu_online(cpu, false);  /* was set in smp_callin() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) decay (char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	int ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	get_option (&str, &ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) __setup("decay=", decay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)  * Initialize the logical CPU number to SAPICID mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) smp_build_cpu_map (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	int sapicid, cpu, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	int boot_cpu_id = hard_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		ia64_cpu_to_sapicid[cpu] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	ia64_cpu_to_sapicid[0] = boot_cpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	init_cpu_present(cpumask_of(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	set_cpu_possible(0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		sapicid = smp_boot_data.cpu_phys_id[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		if (sapicid == boot_cpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		set_cpu_present(cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		set_cpu_possible(cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		ia64_cpu_to_sapicid[cpu] = sapicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		cpu++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)  * Cycle through the APs sending Wakeup IPIs to boot each.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) smp_prepare_cpus (unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	int boot_cpu_id = hard_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	 * Initialize the per-CPU profiling counter/multiplier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	smp_setup_percpu_timer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	cpumask_set_cpu(0, &cpu_callin_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	local_cpu_data->loops_per_jiffy = loops_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	ia64_cpu_to_sapicid[0] = boot_cpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	current_thread_info()->cpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	 * If SMP should be disabled, then really disable it!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	if (!max_cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		printk(KERN_INFO "SMP mode deactivated.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		init_cpu_online(cpumask_of(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		init_cpu_present(cpumask_of(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		init_cpu_possible(cpumask_of(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) void smp_prepare_boot_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	set_cpu_online(smp_processor_id(), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	set_numa_node(cpu_to_node_map[smp_processor_id()]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) clear_cpu_sibling_map(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	for_each_cpu(i, &cpu_core_map[cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		cpumask_clear_cpu(cpu, &cpu_core_map[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) remove_siblinginfo(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	int last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	if (cpu_data(cpu)->threads_per_core == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	    cpu_data(cpu)->cores_per_socket == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		cpumask_clear_cpu(cpu, &cpu_core_map[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	/* remove it from all sibling map's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	clear_cpu_sibling_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) extern void fixup_irqs(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int migrate_platform_irqs(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	int new_cpei_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	struct irq_data *data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	const struct cpumask *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	int 		retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	 * dont permit CPEI target to removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		printk ("CPU (%d) is CPEI Target\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		if (can_cpei_retarget()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 			 * Now re-target the CPEI to a different processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 			new_cpei_cpu = cpumask_any(cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 			mask = cpumask_of(new_cpei_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 			set_cpei_target_cpu(new_cpei_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 			data = irq_get_irq_data(ia64_cpe_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 			 * Switch for now, immediately, we need to do fake intr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 			 * as other interrupts, but need to study CPEI behaviour with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 			 * polling before making changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 			if (data && data->chip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 				data->chip->irq_disable(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 				data->chip->irq_set_affinity(data, mask, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 				data->chip->irq_enable(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 				printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 			printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 			retval = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* must be called with cpucontrol mutex held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) int __cpu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	 * dont permit boot processor for now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	if (cpu == 0 && !bsp_remove_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		printk ("Your platform does not support removal of BSP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		return (-EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	set_cpu_online(cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	if (migrate_platform_irqs(cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		set_cpu_online(cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	remove_siblinginfo(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	fixup_irqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	cpumask_clear_cpu(cpu, &cpu_callin_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) void __cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 		/* They ack this in play_dead by setting CPU_DEAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 			printk ("CPU %d is now offline\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)  	printk(KERN_ERR "CPU %u didn't die...\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) #endif /* CONFIG_HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) smp_cpus_done (unsigned int dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	unsigned long bogosum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	 * Allow the user to impress friends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		bogosum += cpu_data(cpu)->loops_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	       (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static inline void set_cpu_sibling_map(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	for_each_online_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 			cpumask_set_cpu(i, &cpu_core_map[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 			cpumask_set_cpu(cpu, &cpu_core_map[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 			if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 				cpumask_set_cpu(i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 						&per_cpu(cpu_sibling_map, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 				cpumask_set_cpu(cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 						&per_cpu(cpu_sibling_map, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) __cpu_up(unsigned int cpu, struct task_struct *tidle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	int sapicid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	sapicid = ia64_cpu_to_sapicid[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	if (sapicid == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	 * Already booted cpu? not valid anymore since we dont
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	 * do idle loop tightspin anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	if (cpumask_test_cpu(cpu, &cpu_callin_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	/* Processor goes to start_secondary(), sets online flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	ret = do_boot_cpu(sapicid, cpu, tidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	if (cpu_data(cpu)->threads_per_core == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	    cpu_data(cpu)->cores_per_socket == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 		cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 		cpumask_set_cpu(cpu, &cpu_core_map[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	set_cpu_sibling_map(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)  * Assume that CPUs have been discovered by some platform-dependent interface.  For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)  * SoftSDV/Lion, that would be ACPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)  * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) init_smp_config(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 	struct fptr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 		unsigned long fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 		unsigned long gp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	} *ap_startup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	long sal_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	/* Tell SAL where to drop the APs.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	ap_startup = (struct fptr *) start_ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 				       ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	if (sal_ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 		printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 		       ia64_sal_strerror(sal_ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)  * identify_siblings(cpu) gets called from identify_cpu. This populates the 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)  * information related to logical execution units in per_cpu_data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) void identify_siblings(struct cpuinfo_ia64 *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	long status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	u16 pltid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	pal_logical_to_physical_t info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	status = ia64_pal_logical_to_phys(-1, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	if (status != PAL_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 		if (status != PAL_STATUS_UNIMPLEMENTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 			printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 				"ia64_pal_logical_to_phys failed with %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 				status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 		info.overview_ppid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 		info.overview_cpp  = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 		info.overview_tpc  = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	status = ia64_sal_physical_id_info(&pltid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	if (status != PAL_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 		if (status != PAL_STATUS_UNIMPLEMENTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 			printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 				"ia64_sal_pltid failed with %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 				status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	c->socket_id =  (pltid << 8) | info.overview_ppid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	if (info.overview_cpp == 1 && info.overview_tpc == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 	c->cores_per_socket = info.overview_cpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	c->threads_per_core = info.overview_tpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	c->num_log = info.overview_num_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	c->core_id = info.log1_cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	c->thread_id = info.log1_tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)  * returns non zero, if multi-threading is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)  * on at least one physical package. Due to hotplug cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)  * and (maxcpus=), all threads may not necessarily be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)  * even though the processor supports multi-threading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) int is_multithreading_enabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 	for_each_present_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 		for_each_present_cpu(j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 			if (j == i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 			if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 				if (cpu_data(j)->core_id == cpu_data(i)->core_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 					return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) EXPORT_SYMBOL_GPL(is_multithreading_enabled);