^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright IBM Corp. 1999, 2012
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author(s): Denis Joseph Barrow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Martin Schwidefsky <schwidefsky@de.ibm.com>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Heiko Carstens <heiko.carstens@de.ibm.com>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #ifndef __ASM_SMP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define __ASM_SMP_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/sigp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/lowcore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define raw_smp_processor_id() (S390_lowcore.cpu_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) extern struct mutex smp_cpu_state_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) extern unsigned int smp_cpu_mt_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) extern unsigned int smp_cpu_mtid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) extern __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) extern void arch_send_call_function_single_ipi(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) extern void smp_call_online_cpu(void (*func)(void *), void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) extern void smp_call_ipl_cpu(void (*func)(void *), void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) extern void smp_emergency_stop(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) extern int smp_find_processor_id(u16 address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) extern int smp_store_status(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) extern void smp_save_dump_cpus(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) extern void smp_yield_cpu(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) extern void smp_cpu_set_polarization(int cpu, int val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) extern int smp_cpu_get_polarization(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) extern int smp_cpu_get_cpu_address(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) extern void smp_fill_possible_mask(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) extern void smp_detect_cpus(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static inline void smp_stop_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u16 pcpu = stap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) __pcpu_sigp(pcpu, SIGP_STOP, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Return thread 0 CPU number as base CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline int smp_get_base_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return cpu - (cpu % (smp_cpu_mtid + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static inline void smp_cpus_done(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) extern int smp_rescan_cpus(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) extern void __noreturn cpu_die(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) extern void __cpu_die(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) extern int __cpu_disable(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) extern void schedule_mcck_handler(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #endif /* __ASM_SMP_H */