Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _GEN_PV_LOCK_SLOWPATH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #error "do not include this file"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/debug_locks.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * of spinning them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * This relies on the architecture to provide two paravirt hypercalls:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *   pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *   pv_kick(cpu)             -- wakes a suspended vcpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * Using these we implement __pv_queued_spin_lock_slowpath() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * native_queued_spin_unlock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define _Q_SLOW_VAL	(3U << _Q_LOCKED_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * Queue Node Adaptive Spinning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * A queue node vCPU will stop spinning if the vCPU in the previous node is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * not running. The one lock stealing attempt allowed at slowpath entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * mitigates the slight slowdown for non-overcommitted guest with this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * aggressive wait-early mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * The status of the previous node will be checked at fixed interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * controlled by PV_PREV_CHECK_MASK. This is to ensure that we won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * pound on the cacheline of the previous node too heavily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define PV_PREV_CHECK_MASK	0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * Queue node uses: vcpu_running & vcpu_halted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * Queue head uses: vcpu_running & vcpu_hashed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) enum vcpu_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	vcpu_running = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	vcpu_halted,		/* Used only in pv_wait_node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	vcpu_hashed,		/* = pv_hash'ed + vcpu_halted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) struct pv_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct mcs_spinlock	mcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	int			cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	u8			state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * Hybrid PV queued/unfair lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * By replacing the regular queued_spin_trylock() with the function below,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * it will be called once when a lock waiter enter the PV slowpath before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * being queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * The pending bit is set by the queue head vCPU of the MCS wait queue in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * When that bit becomes visible to the incoming waiters, no lock stealing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * is allowed. The function will return immediately to make the waiters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * enter the MCS wait queue. So lock starvation shouldn't happen as long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * as the queued mode vCPUs are actively running to set the pending bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * and hence disabling lock stealing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * When the pending bit isn't set, the lock waiters will stay in the unfair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * mode spinning on the lock unless the MCS wait queue is empty. In this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * case, the lock waiters will enter the queued mode slowpath trying to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * become the queue head and set the pending bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * This hybrid PV queued/unfair lock combines the best attributes of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * queued lock (no lock starvation) and an unfair lock (good performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * on not heavily contended locks).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) #define queued_spin_trylock(l)	pv_hybrid_queued_unfair_trylock(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	 * Stay in unfair lock mode as long as queued mode waiters are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	 * present in the MCS wait queue but the pending bit isn't set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		int val = atomic_read(&lock->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		if (!(val & _Q_LOCKED_PENDING_MASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		   (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			lockevent_inc(pv_lock_stealing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * The pending bit is used by the queue head vCPU to indicate that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * is actively spinning on the lock and no lock stealing is allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #if _Q_PENDING_BITS == 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static __always_inline void set_pending(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	WRITE_ONCE(lock->pending, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  * lock just to be sure that it will get it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static __always_inline int trylock_clear_pending(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return !READ_ONCE(lock->locked) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	       (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 				_Q_LOCKED_VAL) == _Q_PENDING_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #else /* _Q_PENDING_BITS == 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static __always_inline void set_pending(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	atomic_or(_Q_PENDING_VAL, &lock->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static __always_inline int trylock_clear_pending(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	int val = atomic_read(&lock->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		int old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		if (val  & _Q_LOCKED_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		 * Try to clear pending bit & set locked bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		old = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		val = atomic_cmpxchg_acquire(&lock->val, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		if (val == old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #endif /* _Q_PENDING_BITS == 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * Lock and MCS node addresses hash table for fast lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * Hashing is done on a per-cacheline basis to minimize the need to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * more than one cacheline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * Dynamically allocate a hash table big enough to hold at least 4X the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * number of possible cpus in the system. Allocation is done on page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * granularity. So the minimum number of hash buckets should be at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * Since we should not be holding locks from NMI context (very rare indeed) the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  * max load factor is 0.75, which is around the point where open addressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  * breaks down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct pv_hash_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	struct qspinlock *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	struct pv_node   *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define PV_HE_PER_LINE	(SMP_CACHE_BYTES / sizeof(struct pv_hash_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define PV_HE_MIN	(PAGE_SIZE / sizeof(struct pv_hash_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static struct pv_hash_entry *pv_lock_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static unsigned int pv_lock_hash_bits __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * Allocate memory for the PV qspinlock hash buckets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * This function should be called from the paravirt spinlock initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void __init __pv_init_lock_hash(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	if (pv_hash_size < PV_HE_MIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		pv_hash_size = PV_HE_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	 * Allocate space from bootmem which should be page-size aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	 * and hence cacheline aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	pv_lock_hash = alloc_large_system_hash("PV qspinlock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 					       sizeof(struct pv_hash_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 					       pv_hash_size, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 					       HASH_EARLY | HASH_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 					       &pv_lock_hash_bits, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 					       pv_hash_size, pv_hash_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define for_each_hash_entry(he, offset, hash)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	     offset < (1 << pv_lock_hash_bits);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	     offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	struct pv_hash_entry *he;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	int hopcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	for_each_hash_entry(he, offset, hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		hopcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		if (!cmpxchg(&he->lock, NULL, lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			WRITE_ONCE(he->node, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 			lockevent_pv_hop(hopcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			return &he->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	 * Hard assume there is a free entry for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	 * This is guaranteed by ensuring every blocked lock only ever consumes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	 * a single entry, and since we only have 4 nesting levels per CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	 * and allocated 4*nr_possible_cpus(), this must be so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	 * The single entry is guaranteed by having the lock owner unhash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	 * before it releases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static struct pv_node *pv_unhash(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	struct pv_hash_entry *he;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	struct pv_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	for_each_hash_entry(he, offset, hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		if (READ_ONCE(he->lock) == lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			node = READ_ONCE(he->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			WRITE_ONCE(he->lock, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	 * Hard assume we'll find an entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	 * This guarantees a limited lookup time and is itself guaranteed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	 * having the lock owner do the unhash -- IFF the unlock sees the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	 * SLOW flag, there MUST be a hash entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * Return true if when it is time to check the previous node which is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  * in a running state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) pv_wait_early(struct pv_node *prev, int loop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	if ((loop & PV_PREV_CHECK_MASK) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	return READ_ONCE(prev->state) != vcpu_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * Initialize the PV part of the mcs_spinlock node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static void pv_init_node(struct mcs_spinlock *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	struct pv_node *pn = (struct pv_node *)node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	BUILD_BUG_ON(sizeof(struct pv_node) > sizeof(struct qnode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	pn->cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	pn->state = vcpu_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * Wait for node->locked to become true, halt the vcpu after a short spin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * behalf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	struct pv_node *pn = (struct pv_node *)node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	struct pv_node *pp = (struct pv_node *)prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	int loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	bool wait_early;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 			if (READ_ONCE(node->locked))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			if (pv_wait_early(pp, loop)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 				wait_early = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 			cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		 * Order pn->state vs pn->locked thusly:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		 * [S] pn->state = vcpu_halted	  [S] next->locked = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		 *     MB			      MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		 * [L] pn->locked		[RmW] pn->state = vcpu_hashed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		 * Matches the cmpxchg() from pv_kick_node().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		smp_store_mb(pn->state, vcpu_halted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		if (!READ_ONCE(node->locked)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 			lockevent_inc(pv_wait_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 			lockevent_cond_inc(pv_wait_early, wait_early);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			pv_wait(&pn->state, vcpu_halted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		 * If pv_kick_node() changed us to vcpu_hashed, retain that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		 * value so that pv_wait_head_or_lock() knows to not also try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		 * to hash this lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		cmpxchg(&pn->state, vcpu_halted, vcpu_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		 * If the locked flag is still not set after wakeup, it is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		 * spurious wakeup and the vCPU should wait again. However,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		 * there is a pretty high overhead for CPU halting and kicking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		 * So it is better to spin for a while in the hope that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		 * MCS lock will be released soon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		lockevent_cond_inc(pv_spurious_wakeup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 				  !READ_ONCE(node->locked));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	 * By now our node->locked should be 1 and our caller will not actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	 * spin-wait for it. We do however rely on our caller to do a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	 * load-acquire for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  * Called after setting next->locked = 1 when we're the lock owner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  * Instead of waking the waiters stuck in pv_wait_node() advance their state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  * such that they're waiting in pv_wait_head_or_lock(), this avoids a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  * wake/sleep cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	struct pv_node *pn = (struct pv_node *)node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	 * If the vCPU is indeed halted, advance its state to match that of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	 * pv_wait_node(). If OTOH this fails, the vCPU was running and will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	 * observe its next->locked value and advance itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	 * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	 * The write to next->locked in arch_mcs_spin_unlock_contended()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	 * must be ordered before the read of pn->state in the cmpxchg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	 * below for the code to work correctly. To guarantee full ordering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	 * irrespective of the success or failure of the cmpxchg(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	 * a relaxed version with explicit barrier is used. The control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	 * dependency will order the reading of pn->state before any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	 * subsequent writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	    != vcpu_halted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	 * Put the lock into the hash table and set the _Q_SLOW_VAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	 * As this is the same vCPU that will check the _Q_SLOW_VAL value and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	 * the hash table later on at unlock time, no atomic instruction is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	 * needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	WRITE_ONCE(lock->locked, _Q_SLOW_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	(void)pv_hash(lock, pn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  * Wait for l->locked to become clear and acquire the lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)  * halt the vcpu after a short spin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)  * __pv_queued_spin_unlock() will wake us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)  * The current value of the lock will be returned for additional processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	struct pv_node *pn = (struct pv_node *)node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	struct qspinlock **lp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	int waitcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	int loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	 * If pv_kick_node() already advanced our state, we don't need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	 * insert ourselves into the hash table anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	if (READ_ONCE(pn->state) == vcpu_hashed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		lp = (struct qspinlock **)1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	 * Tracking # of slowpath locking operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	lockevent_inc(lock_slowpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	for (;; waitcnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		 * Set correct vCPU state to be used by queue node wait-early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		 * mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		WRITE_ONCE(pn->state, vcpu_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		 * Set the pending bit in the active lock spinning loop to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		 * disable lock stealing before attempting to acquire the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		set_pending(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		for (loop = SPIN_THRESHOLD; loop; loop--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 			if (trylock_clear_pending(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 				goto gotlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 			cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		clear_pending(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		if (!lp) { /* ONCE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			lp = pv_hash(lock, pn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 			 * We must hash before setting _Q_SLOW_VAL, such that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 			 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			 * we'll be sure to be able to observe our hash entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 			 *   [S] <hash>                 [Rmw] l->locked == _Q_SLOW_VAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			 *       MB                           RMB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 			 * [RmW] l->locked = _Q_SLOW_VAL  [L] <unhash>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			 * Matches the smp_rmb() in __pv_queued_spin_unlock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 				 * The lock was free and now we own the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 				 * Change the lock value back to _Q_LOCKED_VAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 				 * and unhash the table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 				WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 				WRITE_ONCE(*lp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 				goto gotlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		WRITE_ONCE(pn->state, vcpu_hashed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		lockevent_inc(pv_wait_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		lockevent_cond_inc(pv_wait_again, waitcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		pv_wait(&lock->locked, _Q_SLOW_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		 * Because of lock stealing, the queue head vCPU may not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		 * able to acquire the lock before it has to wait again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	 * The cmpxchg() or xchg() call before coming here provides the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	 * acquire semantics for locking. The dummy ORing of _Q_LOCKED_VAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	 * here is to indicate to the compiler that the value will always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	 * be nozero to enable better code optimization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) gotlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)  * PV versions of the unlock fastpath and slowpath functions to be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)  * instead of queued_spin_unlock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) __visible void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	struct pv_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	if (unlikely(locked != _Q_SLOW_VAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		WARN(!debug_locks_silent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		     "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		     (unsigned long)lock, atomic_read(&lock->val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	 * A failed cmpxchg doesn't provide any memory-ordering guarantees,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	 * so we need a barrier to order the read of the node data in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	 * pv_unhash *after* we've read the lock being _Q_SLOW_VAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	 * Matches the cmpxchg() in pv_wait_head_or_lock() setting _Q_SLOW_VAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	 * Since the above failed to release, this must be the SLOW path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	 * Therefore start by looking up the blocked node and unhashing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	node = pv_unhash(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	 * Now that we have a reference to the (likely) blocked pv_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	 * release the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	smp_store_release(&lock->locked, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	 * At this point the memory pointed at by lock can be freed/reused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	 * however we can still use the pv_node to kick the CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	 * The other vCPU may not really be halted, but kicking an active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	 * vCPU is harmless other than the additional latency in completing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	 * the unlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	lockevent_inc(pv_kick_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	pv_kick(node->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)  * Include the architecture specific callee-save thunk of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)  * __pv_queued_spin_unlock(). This thunk is put together with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)  * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)  * function close to each other sharing consecutive instruction cachelines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)  * Alternatively, architecture specific version of __pv_queued_spin_unlock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)  * can be defined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) #include <asm/qspinlock_paravirt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #ifndef __pv_queued_spin_unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	u8 locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	 * We must not unlock if SLOW, because in that case we must first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	 * unhash. Otherwise it would be possible to have multiple @lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	 * entries, which would be BAD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	if (likely(locked == _Q_LOCKED_VAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	__pv_queued_spin_unlock_slowpath(lock, locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) #endif /* __pv_queued_spin_unlock */