Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Queued spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Authors: Waiman Long <waiman.long@hpe.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #ifndef __ASM_GENERIC_QSPINLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #define __ASM_GENERIC_QSPINLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm-generic/qspinlock_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #ifndef queued_spin_is_locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * queued_spin_is_locked - is the spinlock locked?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * @lock: Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * Return: 1 if it is locked, 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	 * isn't immediately observable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	return atomic_read(&lock->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * queued_spin_value_unlocked - is the spinlock structure unlocked?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * @lock: queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * Return: 1 if it is unlocked, 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * N.B. Whenever there are tasks waiting for the lock, it is considered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  *      locked wrt the lockref code to avoid lock stealing by the lockref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  *      code and change things underneath the lock. This also allows some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *      optimizations to be applied without conflict with lockref.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	return !atomic_read(&lock.val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * queued_spin_is_contended - check if the lock is contended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * @lock : Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * Return: 1 if lock contended, 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * queued_spin_trylock - try to acquire the queued spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * @lock : Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * Return: 1 if lock acquired, 0 if failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) static __always_inline int queued_spin_trylock(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	u32 val = atomic_read(&lock->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	if (unlikely(val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #ifndef queued_spin_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * queued_spin_lock - acquire a queued spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * @lock: Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) static __always_inline void queued_spin_lock(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	queued_spin_lock_slowpath(lock, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #ifndef queued_spin_unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * queued_spin_unlock - release a queued spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * @lock : Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) static __always_inline void queued_spin_unlock(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	 * unlock() needs release semantics:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	smp_store_release(&lock->locked, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #ifndef virt_spin_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static __always_inline bool virt_spin_lock(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * Remapping spinlock architecture specific functions to the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * queued spinlock functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define arch_spin_is_locked(l)		queued_spin_is_locked(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define arch_spin_is_contended(l)	queued_spin_is_contended(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define arch_spin_lock(l)		queued_spin_lock(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define arch_spin_trylock(l)		queued_spin_trylock(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define arch_spin_unlock(l)		queued_spin_unlock(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #endif /* __ASM_GENERIC_QSPINLOCK_H */