^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __ASM_SMP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __ASM_SMP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) extern int init_per_cpu(int cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #if defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) /* Page Zero Location PDC will look for the address to branch to when we poke
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) ** slave CPUs still in "Icache loop".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define PDC_OS_BOOT_RENDEZVOUS 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define PDC_OS_BOOT_RENDEZVOUS_HI 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #ifndef ASSEMBLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/threads.h> /* for NR_CPUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) typedef unsigned long address_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Private routines/data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * physical and logical are equivalent until we support CPU hotplug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define cpu_number_map(cpu) (cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define cpu_logical_map(cpu) (cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) extern void smp_send_all_nop(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) extern void arch_send_call_function_single_ipi(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #endif /* !ASSEMBLY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define raw_smp_processor_id() (current_thread_info()->cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #else /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static inline void smp_send_all_nop(void) { return; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define NO_PROC_ID 0xFF /* No processor magic marker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define ANY_PROC_ID 0xFF /* Any processor magic marker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static inline int __cpu_disable (void) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static inline void __cpu_die (unsigned int cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) while(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #endif /* __ASM_SMP_H */