^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_POWERPC_PARAVIRT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_POWERPC_PARAVIRT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/jump_label.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/paca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/hvcall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #ifdef CONFIG_PPC_SPLPAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) DECLARE_STATIC_KEY_FALSE(shared_processor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static inline bool is_shared_processor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) return static_branch_unlikely(&shared_processor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* If bit 0 is set, the cpu has been preempted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static inline u32 yield_count_of(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) return be32_to_cpu(yield_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Spinlock code confers and prods, so don't trace the hcalls because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * tracing code takes spinlocks which can cause recursion deadlocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * These calls are made while the lock is not held: the lock slowpath yields if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * it can not acquire the lock, and unlock slow path might prod if a waiter has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * yielded). So this may not be a problem for simple spin locks because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * tracing does not technically recurse on the lock, but we avoid it anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * However the queued spin lock contended path is more strictly ordered: the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * H_CONFER hcall is made after the task has queued itself on the lock, so then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * recursing on that lock will cause the task to then queue up again behind the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * first instance (or worse: queued spinlocks use tricks that assume a context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * never waits on more than one spinlock, so such recursion may cause random
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * corruption in the lock code).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static inline void yield_to_preempted(int cpu, u32 yield_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static inline void prod_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static inline void yield_to_any(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) plpar_hcall_norets_notrace(H_CONFER, -1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static inline bool is_shared_processor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static inline u32 yield_count_of(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) extern void ___bad_yield_to_preempted(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static inline void yield_to_preempted(int cpu, u32 yield_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ___bad_yield_to_preempted(); /* This would be a bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) extern void ___bad_yield_to_any(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static inline void yield_to_any(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ___bad_yield_to_any(); /* This would be a bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) extern void ___bad_prod_cpu(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline void prod_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ___bad_prod_cpu(); /* This would be a bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define vcpu_is_preempted vcpu_is_preempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static inline bool vcpu_is_preempted(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (!is_shared_processor())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (yield_count_of(cpu) & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline bool pv_is_native_spin_unlock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return !is_shared_processor();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #endif /* _ASM_POWERPC_PARAVIRT_H */