^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_POWERPC_CPUTHREADS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_POWERPC_CPUTHREADS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/cpu_has_feature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Mapping of threads to cores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Note: This implementation is limited to a power of 2 number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * threads per core and the same number for each core in the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * (though it would work if some processors had less threads as long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * as the CPU numbers are still allocated, just not brought online).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * However, the API allows for a different implementation in the future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * if needed, as long as you only use the functions and not the variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) extern int threads_per_core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) extern int threads_per_subcore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) extern int threads_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) extern cpumask_t threads_core_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define threads_per_core 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define threads_per_subcore 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define threads_shift 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define has_big_cores 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define threads_core_mask (*get_cpu_mask(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* cpu_thread_mask_to_cores - Return a cpumask of one per cores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * hit by the argument
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * @threads: a cpumask of online threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * This function returns a cpumask which will have one online cpu's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * bit set for each core that has at least one thread set in the argument.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * This can typically be used for things like IPI for tlb invalidations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * since those need to be done only once per core/TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) cpumask_t tmp, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int i, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) cpumask_clear(&res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) for (i = 0; i < NR_CPUS; i += threads_per_core) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) cpumask_shift_left(&tmp, &threads_core_mask, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (cpumask_intersects(threads, &tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (cpu < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) cpumask_set_cpu(cpu, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static inline int cpu_nr_cores(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return nr_cpu_ids >> threads_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static inline cpumask_t cpu_online_cores_map(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return cpu_thread_mask_to_cores(cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int cpu_core_index_of_thread(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int cpu_first_thread_of_core(int core);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static inline int cpu_first_thread_of_core(int core) { return core; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline int cpu_thread_in_core(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return cpu & (threads_per_core - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static inline int cpu_thread_in_subcore(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return cpu & (threads_per_subcore - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline int cpu_first_thread_sibling(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return cpu & ~(threads_per_core - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static inline int cpu_last_thread_sibling(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return cpu | (threads_per_core - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * tlb_thread_siblings are siblings which share a TLB. This is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * architected, is not something a hypervisor could emulate and a future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * CPU may change behaviour even in compat mode, so this should only be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * used on PowerNV, and only with care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static inline int cpu_first_tlb_thread_sibling(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return cpu & ~0x6; /* Big Core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return cpu_first_thread_sibling(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline int cpu_last_tlb_thread_sibling(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return cpu | 0x6; /* Big Core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return cpu_last_thread_sibling(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static inline int cpu_tlb_thread_sibling_step(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return 2; /* Big Core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static inline u32 get_tensr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (cpu_has_feature(CPU_FTR_SMT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return mfspr(SPRN_TENSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) void book3e_start_thread(int thread, unsigned long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) void book3e_stop_thread(int thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define INVALID_THREAD_HWID 0x0fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #endif /* _ASM_POWERPC_CPUTHREADS_H */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)