Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * smp.h: PowerPC-specific SMP code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Original was a copy of sparc smp.h.  Now heavily modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * for PPC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #ifndef _ASM_POWERPC_SMP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #define _ASM_POWERPC_SMP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #ifdef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/irqreturn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/paca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) extern int boot_cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) extern int spinning_secondaries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) extern u32 *cpu_to_phys_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) extern bool coregroup_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) extern int cpu_to_chip_id(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) struct smp_ops_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	void  (*message_pass)(int cpu, int msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #ifdef CONFIG_PPC_SMP_MUXED_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	void  (*cause_ipi)(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	int   (*cause_nmi_ipi)(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	void  (*probe)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	int   (*kick_cpu)(int nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	int   (*prepare_cpu)(int nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	void  (*setup_cpu)(int nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	void  (*bringup_done)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	void  (*take_timebase)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	void  (*give_timebase)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	int   (*cpu_disable)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	void  (*cpu_die)(unsigned int nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	int   (*cpu_bootable)(unsigned int nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	void  (*cpu_offline_self)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) extern void smp_send_debugger_break(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) extern void start_secondary_resume(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) extern void smp_generic_give_timebase(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) extern void smp_generic_take_timebase(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) DECLARE_PER_CPU(unsigned int, cpu_pvr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) int generic_cpu_disable(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) void generic_cpu_die(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) void generic_set_cpu_dead(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) void generic_set_cpu_up(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) int generic_check_cpu_restart(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) int is_cpu_dead(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define generic_set_cpu_up(i)	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #define raw_smp_processor_id()	(local_paca->paca_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) #define hard_smp_processor_id() (get_paca()->hw_cpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) /* 32-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) extern int smp_hw_index[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * This is particularly ugly: it appears we can't actually get the definition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * of task_struct here, but we need access to the CPU this task is running on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  * Instead of using task_struct we're using _TASK_CPU which is extracted from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * asm-offsets.h by kbuild to get the current processor ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * This also needs to be safeguarded when building asm-offsets.s because at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * that time _TASK_CPU is not defined yet. It could have been guarded by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * _TASK_CPU itself, but we want the build to fail if _TASK_CPU is missing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * when building something else than asm-offsets.s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #ifdef GENERATING_ASM_OFFSETS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #define raw_smp_processor_id()		(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #define raw_smp_processor_id()		(*(unsigned int *)((void *)current + _TASK_CPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define hard_smp_processor_id() 	(smp_hw_index[smp_processor_id()])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline int get_hard_smp_processor_id(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	return smp_hw_index[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static inline void set_hard_smp_processor_id(int cpu, int phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	smp_hw_index[cpu] = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) DECLARE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) DECLARE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static inline struct cpumask *cpu_sibling_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return per_cpu(cpu_sibling_map, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static inline struct cpumask *cpu_core_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	return per_cpu(cpu_core_map, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static inline struct cpumask *cpu_l2_cache_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	return per_cpu(cpu_l2_cache_map, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static inline struct cpumask *cpu_smallcore_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	return per_cpu(cpu_smallcore_map, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) extern int cpu_to_core_id(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) extern bool has_big_cores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define cpu_smt_mask cpu_smt_mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #ifdef CONFIG_SCHED_SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static inline const struct cpumask *cpu_smt_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (has_big_cores)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		return per_cpu(cpu_smallcore_map, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	return per_cpu(cpu_sibling_map, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #endif /* CONFIG_SCHED_SMT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * in /proc/interrupts will be wrong!!! --Troy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define PPC_MSG_CALL_FUNCTION	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define PPC_MSG_RESCHEDULE	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define PPC_MSG_TICK_BROADCAST	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define PPC_MSG_NMI_IPI		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* This is only used by the powernv kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define PPC_MSG_RM_HOST_ACTION	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define NMI_IPI_ALL_OTHERS		-2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #ifdef CONFIG_NMI_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) extern int smp_handle_nmi_ipi(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static inline int smp_handle_nmi_ipi(struct pt_regs *regs) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* for irq controllers that have dedicated ipis per message (4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) extern int smp_request_message_ipi(int virq, int message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) extern const char *smp_ipi_name[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* for irq controllers with only a single ipi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) extern void smp_muxed_ipi_message_pass(int cpu, int msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) extern void smp_muxed_ipi_set_message(int cpu, int msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) extern irqreturn_t smp_ipi_demux(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) extern irqreturn_t smp_ipi_demux_relaxed(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) void smp_init_pSeries(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) void smp_init_cell(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void smp_setup_cpu_maps(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) extern int __cpu_disable(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) extern void __cpu_die(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* for UP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define hard_smp_processor_id()		get_hard_smp_processor_id(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define smp_setup_cpu_maps()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static inline void inhibit_secondary_onlining(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static inline void uninhibit_secondary_onlining(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static inline const struct cpumask *cpu_sibling_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	return cpumask_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static inline const struct cpumask *cpu_smallcore_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	return cpumask_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static inline int get_hard_smp_processor_id(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	return paca_ptrs[cpu]->hw_cpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static inline void set_hard_smp_processor_id(int cpu, int phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	paca_ptrs[cpu]->hw_cpu_id = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* 32-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) extern int boot_cpuid_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static inline int get_hard_smp_processor_id(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	return boot_cpuid_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static inline void set_hard_smp_processor_id(int cpu, int phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	boot_cpuid_phys = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #endif /* !CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #endif /* !CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) extern void smp_release_cpus(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static inline void smp_release_cpus(void) { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) extern int smt_enabled_at_boot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) extern void smp_mpic_probe(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) extern void smp_mpic_setup_cpu(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) extern int smp_generic_kick_cpu(int nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) extern int smp_generic_cpu_bootable(unsigned int nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) extern void smp_generic_give_timebase(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) extern void smp_generic_take_timebase(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) extern struct smp_ops_t *smp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) extern void arch_send_call_function_single_ipi(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Definitions relative to the secondary CPU spin loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * and entry point. Not all of them exist on both 32 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  * 64-bit but defining them all here doesn't harm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) extern void generic_secondary_smp_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) extern unsigned long __secondary_hold_spinloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) extern unsigned long __secondary_hold_acknowledge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) extern char __secondary_hold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) extern unsigned int booting_thread_hwid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) extern void __early_start(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #endif /* __KERNEL__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #endif /* _ASM_POWERPC_SMP_H) */