^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_POWERPC_QSPINLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_POWERPC_QSPINLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm-generic/qspinlock_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <asm/paravirt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #ifdef CONFIG_PARAVIRT_SPINLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) extern void __pv_queued_spin_unlock(struct qspinlock *lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) if (!is_shared_processor())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) native_queued_spin_lock_slowpath(lock, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) __pv_queued_spin_lock_slowpath(lock, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define queued_spin_unlock queued_spin_unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static inline void queued_spin_unlock(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (!is_shared_processor())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) smp_store_release(&lock->locked, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) __pv_queued_spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static __always_inline void queued_spin_lock(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) queued_spin_lock_slowpath(lock, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define queued_spin_lock queued_spin_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define smp_mb__after_spinlock() smp_mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * This barrier was added to simple spinlocks by commit 51d7d5205d338,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * but it should now be possible to remove it, asm arm64 has done with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * commit c6f5d02b6a0f.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return atomic_read(&lock->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define queued_spin_is_locked queued_spin_is_locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #ifdef CONFIG_PARAVIRT_SPINLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define SPIN_THRESHOLD (1<<15) /* not tuned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static __always_inline void pv_wait(u8 *ptr, u8 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (*ptr != val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) yield_to_any();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * We could pass in a CPU here if waiting in the queue and yield to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * the previous CPU in the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static __always_inline void pv_kick(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) prod_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) extern void __pv_init_lock_hash(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static inline void pv_spinlocks_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) __pv_init_lock_hash();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #include <asm-generic/qspinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #endif /* _ASM_POWERPC_QSPINLOCK_H */