^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_X86_QSPINLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_X86_QSPINLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/jump_label.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm-generic/qspinlock_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/paravirt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/rmwcc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define _Q_PENDING_LOOPS (1 << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * statement expression, which GCC doesn't like.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #ifdef CONFIG_PARAVIRT_SPINLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) extern void __pv_init_lock_hash(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) extern bool nopvspin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define queued_spin_unlock queued_spin_unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * queued_spin_unlock - release a queued spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * @lock : Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * A smp_store_release() on the least-significant byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static inline void native_queued_spin_unlock(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) smp_store_release(&lock->locked, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) pv_queued_spin_lock_slowpath(lock, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static inline void queued_spin_unlock(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) pv_queued_spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define vcpu_is_preempted vcpu_is_preempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static inline bool vcpu_is_preempted(long cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return pv_vcpu_is_preempted(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #ifdef CONFIG_PARAVIRT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Native (and PV wanting native due to vCPU pinning) should disable this key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * It is done in this backwards fashion to only have a single direction change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * which removes ordering between native_pv_spin_init() and HV setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) void native_pv_lock_init(void) __init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Shortcut for the queued_spin_lock_slowpath() function that allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * virt to hijack it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * true - lock has been negotiated, all done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * false - queued_spin_lock_slowpath() will do its thing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define virt_spin_lock virt_spin_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static inline bool virt_spin_lock(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (!static_branch_likely(&virt_spin_lock_key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * On hypervisors without PARAVIRT_SPINLOCKS support we fall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * back to a Test-and-Set spinlock, because fair locks have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * horrible lock 'holder' preemption issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) while (atomic_read(&lock->val) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static inline void native_pv_lock_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #endif /* CONFIG_PARAVIRT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #include <asm-generic/qspinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #endif /* _ASM_X86_QSPINLOCK_H */