Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _ASM_X86_SMP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _ASM_X86_SMP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <asm/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) extern int smp_num_siblings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) extern unsigned int num_processors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) /* cpus sharing the last level cache: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) static inline struct cpumask *cpu_llc_shared_mask(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	return per_cpu(cpu_llc_shared_map, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) struct task_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) struct smp_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	void (*smp_prepare_boot_cpu)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	void (*smp_prepare_cpus)(unsigned max_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	void (*smp_cpus_done)(unsigned max_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	void (*stop_other_cpus)(int wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	void (*crash_stop_other_cpus)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	void (*smp_send_reschedule)(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	int (*cpu_disable)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	void (*cpu_die)(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	void (*play_dead)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	void (*send_call_func_ipi)(const struct cpumask *mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	void (*send_call_func_single_ipi)(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) /* Globals due to paravirt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) extern void set_cpu_sibling_map(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) extern struct smp_ops smp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) static inline void smp_send_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	smp_ops.stop_other_cpus(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static inline void stop_other_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	smp_ops.stop_other_cpus(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) static inline void smp_prepare_boot_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	smp_ops.smp_prepare_boot_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static inline void smp_prepare_cpus(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	smp_ops.smp_prepare_cpus(max_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) static inline void smp_cpus_done(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	smp_ops.smp_cpus_done(max_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	return smp_ops.cpu_up(cpu, tidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) static inline int __cpu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	return smp_ops.cpu_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static inline void __cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	smp_ops.cpu_die(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static inline void play_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	smp_ops.play_dead();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static inline void smp_send_reschedule(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	smp_ops.smp_send_reschedule(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static inline void arch_send_call_function_single_ipi(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	smp_ops.send_call_func_single_ipi(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	smp_ops.send_call_func_ipi(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) void cpu_disable_common(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) void native_smp_prepare_boot_cpu(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) void native_smp_prepare_cpus(unsigned int max_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) void calculate_max_logical_packages(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void native_smp_cpus_done(unsigned int max_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int native_cpu_disable(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int common_cpu_die(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) void native_cpu_die(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) void hlt_play_dead(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void native_play_dead(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void play_dead_common(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) void wbinvd_on_cpu(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) int wbinvd_on_all_cpus(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void cond_wakeup_cpu0(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void native_smp_send_reschedule(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) void native_send_call_func_ipi(const struct cpumask *mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void native_send_call_func_single_ipi(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void smp_store_boot_cpu_info(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void smp_store_cpu_info(int id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) asmlinkage __visible void smp_reboot_interrupt(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) __visible void smp_reschedule_interrupt(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) __visible void smp_call_function_interrupt(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) __visible void smp_call_function_single_interrupt(struct pt_regs *r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define cpu_physical_id(cpu)	per_cpu(x86_cpu_to_apicid, cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define cpu_acpi_id(cpu)	per_cpu(x86_cpu_to_acpiid, cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * This function is needed by all SMP systems. It must _always_ be valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * from the initial startup. We map APIC_BASE very early in page_setup(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * so this is correct in the x86 case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define raw_smp_processor_id()  this_cpu_read(cpu_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define __smp_processor_id() __this_cpu_read(cpu_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) extern int safe_smp_processor_id(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) # define safe_smp_processor_id()	smp_processor_id()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #else /* !CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define wbinvd_on_cpu(cpu)     wbinvd()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static inline int wbinvd_on_all_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	wbinvd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) extern unsigned disabled_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #ifdef CONFIG_X86_LOCAL_APIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) extern int hard_smp_processor_id(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #else /* CONFIG_X86_LOCAL_APIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define hard_smp_processor_id()	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #endif /* CONFIG_X86_LOCAL_APIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #ifdef CONFIG_DEBUG_NMI_SELFTEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) extern void nmi_selftest(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define nmi_selftest() do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #endif /* _ASM_X86_SMP_H */