| |
| |
| |
| |
| |
| |
| |
| |
| |
| #ifndef __ASM_GENERIC_QSPINLOCK_H |
| #define __ASM_GENERIC_QSPINLOCK_H |
| |
| #include <asm-generic/qspinlock_types.h> |
| #include <linux/atomic.h> |
| |
| #ifndef queued_spin_is_locked |
| |
| |
| |
| |
| |
| static __always_inline int queued_spin_is_locked(struct qspinlock *lock) |
| { |
| <------> |
| <------> * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL |
| <------> * isn't immediately observable. |
| <------> */ |
| <------>return atomic_read(&lock->val); |
| } |
| #endif |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) |
| { |
| <------>return !atomic_read(&lock.val); |
| } |
| |
| |
| |
| |
| |
| |
| static __always_inline int queued_spin_is_contended(struct qspinlock *lock) |
| { |
| <------>return atomic_read(&lock->val) & ~_Q_LOCKED_MASK; |
| } |
| |
| |
| |
| |
| |
| static __always_inline int queued_spin_trylock(struct qspinlock *lock) |
| { |
| <------>u32 val = atomic_read(&lock->val); |
| |
| <------>if (unlikely(val)) |
| <------><------>return 0; |
| |
| <------>return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); |
| } |
| |
| extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); |
| |
| #ifndef queued_spin_lock |
| |
| |
| |
| |
| static __always_inline void queued_spin_lock(struct qspinlock *lock) |
| { |
| <------>u32 val = 0; |
| |
| <------>if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) |
| <------><------>return; |
| |
| <------>queued_spin_lock_slowpath(lock, val); |
| } |
| #endif |
| |
| #ifndef queued_spin_unlock |
| |
| |
| |
| |
| static __always_inline void queued_spin_unlock(struct qspinlock *lock) |
| { |
| <------> |
| <------> * unlock() needs release semantics: |
| <------> */ |
| <------>smp_store_release(&lock->locked, 0); |
| } |
| #endif |
| |
| #ifndef virt_spin_lock |
| static __always_inline bool virt_spin_lock(struct qspinlock *lock) |
| { |
| <------>return false; |
| } |
| #endif |
| |
| |
| |
| |
| |
| #define arch_spin_is_locked(l) queued_spin_is_locked(l) |
| #define arch_spin_is_contended(l) queued_spin_is_contended(l) |
| #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l) |
| #define arch_spin_lock(l) queued_spin_lock(l) |
| #define arch_spin_trylock(l) queued_spin_trylock(l) |
| #define arch_spin_unlock(l) queued_spin_unlock(l) |
| |
| #endif |
| |