^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Queued spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * (C) Copyright 2013-2014,2018 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * (C) Copyright 2015 Intel Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Authors: Waiman Long <longman@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Peter Zijlstra <peterz@infradead.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #ifndef _GEN_PV_LOCK_SLOWPATH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/qspinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Include queued spinlock statistics code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "qspinlock_stat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * The basic principle of a queue-based spinlock can best be understood
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * by studying a classic queue-based spinlock implementation called the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * MCS lock. A copy of the original MCS lock paper ("Algorithms for Scalable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Synchronization on Shared-Memory Multiprocessors by Mellor-Crummey and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Scott") is available at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * https://bugzilla.kernel.org/show_bug.cgi?id=206115
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * This queued spinlock implementation is based on the MCS lock, however to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * make it fit the 4 bytes we assume spinlock_t to be, and preserve its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * existing API, we must modify it somehow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * In particular; where the traditional MCS lock consists of a tail pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * unlock the next pending (next->locked), we compress both these: {tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * next->locked} into a single u32 value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Since a spinlock disables recursion of its own context and there is a limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * we can encode the tail by combining the 2-bit nesting level with the cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * number. With one byte for the lock value and 3 bytes for the tail, only a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * 32-bit word is now needed. Even though we only need 1 bit for the lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * we extend it to a full byte to achieve better performance for architectures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * that support atomic byte write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * We also change the first spinner to spin on the lock bit instead of its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * node; whereby avoiding the need to carry a node from lock to unlock, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * preserving existing lock API. This also makes the unlock code simpler and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * faster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * N.B. The current implementation only supports architectures that allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * atomic operations on smaller 8-bit and 16-bit data types.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include "mcs_spinlock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define MAX_NODES 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * On 64-bit architectures, the mcs_spinlock structure will be 16 bytes in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * size and four of them will fit nicely in one 64-byte cacheline. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * pvqspinlock, however, we need more space for extra data. To accommodate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * that, we insert two more long words to pad it up to 32 bytes. IOW, only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * two of them can fit in a cacheline in this case. That is OK as it is rare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * to have more than 2 levels of slowpath nesting in actual use. We don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * want to penalize pvqspinlocks to optimize for a rare case in native
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * qspinlocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct qnode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct mcs_spinlock mcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #ifdef CONFIG_PARAVIRT_SPINLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) long reserved[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * The pending bit spinning loop count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * This heuristic is used to limit the number of lockword accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * made by atomic_cond_read_relaxed when waiting for the lock to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * transition out of the "== _Q_PENDING_VAL" state. We don't spin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * indefinitely because there's no guarantee that we'll make forward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #ifndef _Q_PENDING_LOOPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define _Q_PENDING_LOOPS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * Per-CPU queue node structures; we can never have more than 4 nested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * contexts: task, softirq, hardirq, nmi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * Exactly fits one 64-byte cacheline on a 64-bit architecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * PV doubles the storage and uses the second cacheline for PV state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static DEFINE_PER_CPU_ALIGNED(struct qnode, qnodes[MAX_NODES]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * We must be able to distinguish between no-tail and the tail at 0:0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * therefore increment the cpu number by one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline __pure u32 encode_tail(int cpu, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u32 tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return per_cpu_ptr(&qnodes[idx].mcs, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static inline __pure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct mcs_spinlock *grab_mcs_node(struct mcs_spinlock *base, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return &((struct qnode *)base + idx)->mcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #if _Q_PENDING_BITS == 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * clear_pending - clear the pending bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @lock: Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * *,1,* -> *,0,*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static __always_inline void clear_pending(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) WRITE_ONCE(lock->pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * clear_pending_set_locked - take ownership and clear the pending bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * @lock: Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * *,1,0 -> *,0,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * Lock stealing is not allowed if this function is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * xchg_tail - Put in the new queue tail code word & retrieve previous one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * @lock : Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @tail : The new queue tail code word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Return: The previous queue tail code word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * xchg(lock, tail), which heads an address dependency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * p,*,* -> n,*,* ; prev = xchg(lock, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * We can use relaxed semantics since the caller ensures that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * MCS node is properly initialized before updating the tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return (u32)xchg_relaxed(&lock->tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #else /* _Q_PENDING_BITS == 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * clear_pending - clear the pending bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * @lock: Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * *,1,* -> *,0,*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static __always_inline void clear_pending(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) atomic_andnot(_Q_PENDING_VAL, &lock->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * clear_pending_set_locked - take ownership and clear the pending bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * @lock: Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * *,1,0 -> *,0,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * xchg_tail - Put in the new queue tail code word & retrieve previous one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * @lock : Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * @tail : The new queue tail code word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Return: The previous queue tail code word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * xchg(lock, tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * p,*,* -> n,*,* ; prev = xchg(lock, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u32 old, new, val = atomic_read(&lock->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) new = (val & _Q_LOCKED_PENDING_MASK) | tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * We can use relaxed semantics since the caller ensures that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * the MCS node is properly initialized before updating the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) old = atomic_cmpxchg_relaxed(&lock->val, val, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (old == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) val = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #endif /* _Q_PENDING_BITS == 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * @lock : Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * Return: The previous lock value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * *,*,* -> *,1,*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #ifndef queued_fetch_set_pending_acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * set_locked - Set the lock bit and own the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * @lock: Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * *,*,0 -> *,0,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static __always_inline void set_locked(struct qspinlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * all the PV callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct mcs_spinlock *prev) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static __always_inline void __pv_kick_node(struct qspinlock *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct mcs_spinlock *node) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct mcs_spinlock *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) #define pv_enabled() false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #define pv_init_node __pv_init_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #define pv_wait_node __pv_wait_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #define pv_kick_node __pv_kick_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #define pv_wait_head_or_lock __pv_wait_head_or_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #ifdef CONFIG_PARAVIRT_SPINLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #endif /* _GEN_PV_LOCK_SLOWPATH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * queued_spin_lock_slowpath - acquire the queued spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * @lock: Pointer to queued spinlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * @val: Current value of the queued spinlock 32-bit word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * (queue tail, pending bit, lock value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * fast : slow : unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * : :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * : | ^--------.------. / :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * : v \ \ | :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * pending : (0,1,1) +--> (0,1,0) \ | :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * : | ^--' | | :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * : v | | :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * uncontended : (n,x,y) +--> (n,0,0) --' | :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * queue : | ^--' | :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * : v | :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * queue : ^--' :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct mcs_spinlock *prev, *next, *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) u32 old, tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (pv_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) goto pv_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (virt_spin_lock(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * Wait for in-progress pending->locked hand-overs with a bounded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * number of spins so that we guarantee forward progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * 0,1,0 -> 0,0,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (val == _Q_PENDING_VAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int cnt = _Q_PENDING_LOOPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) val = atomic_cond_read_relaxed(&lock->val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) (VAL != _Q_PENDING_VAL) || !cnt--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * If we observe any contention; queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (val & ~_Q_LOCKED_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) goto queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * trylock || pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) val = queued_fetch_set_pending_acquire(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * If we observe contention, there is a concurrent locker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * Undo and queue; our setting of PENDING might have made the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * n,0,0 -> 0,0,0 transition fail and it will now be waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * on @next to become !NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (unlikely(val & ~_Q_LOCKED_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* Undo PENDING if we set it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!(val & _Q_PENDING_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) clear_pending(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) goto queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * We're pending, wait for the owner to go away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * 0,1,1 -> 0,1,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * this wait loop must be a load-acquire such that we match the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * store-release that clears the locked bit and create lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * sequentiality; this is because not all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * clear_pending_set_locked() implementations imply full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * barriers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (val & _Q_LOCKED_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * take ownership and clear the pending bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * 0,1,0 -> 0,0,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) clear_pending_set_locked(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) lockevent_inc(lock_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * End of pending bit optimistic spinning and beginning of MCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * queuing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) lockevent_inc(lock_slowpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) pv_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) node = this_cpu_ptr(&qnodes[0].mcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) idx = node->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) tail = encode_tail(smp_processor_id(), idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * 4 nodes are allocated based on the assumption that there will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * not be nested NMIs taking spinlocks. That may not be true in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * some architectures even though the chance of needing more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * 4 nodes will still be extremely unlikely. When that happens,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * we fall back to spinning on the lock directly without using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * any MCS node. This is not the most elegant solution, but is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * simple enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (unlikely(idx >= MAX_NODES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) lockevent_inc(lock_no_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) while (!queued_spin_trylock(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) node = grab_mcs_node(node, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * Keep counts of non-zero index values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) lockevent_cond_inc(lock_use_node2 + idx - 1, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * Ensure that we increment the head node->count before initialising
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * the actual node. If the compiler is kind enough to reorder these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * stores, then an IRQ could overwrite our assignments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) node->locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) node->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) pv_init_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * We touched a (possibly) cold cacheline in the per-cpu queue node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * attempt the trylock once more in the hope someone let go while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * weren't watching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (queued_spin_trylock(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * Ensure that the initialisation of @node is complete before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * publish the updated tail via xchg_tail() and potentially link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * @node into the waitqueue via WRITE_ONCE(prev->next, node) below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Publish the updated tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * We have already touched the queueing cacheline; don't bother with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * pending stuff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * p,*,* -> n,*,*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) old = xchg_tail(lock, tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * if there was a previous node; link it and wait until reaching the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * head of the waitqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (old & _Q_TAIL_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) prev = decode_tail(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* Link @node into the waitqueue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) WRITE_ONCE(prev->next, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) pv_wait_node(node, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) arch_mcs_spin_lock_contended(&node->locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * While waiting for the MCS lock, the next pointer may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * been set by another lock waiter. We optimistically load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * the next pointer & prefetch the cacheline for writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * to reduce latency in the upcoming MCS unlock operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) next = READ_ONCE(node->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) prefetchw(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * we're at the head of the waitqueue, wait for the owner & pending to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * go away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * *,x,y -> *,0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * this wait loop must use a load-acquire such that we match the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * store-release that clears the locked bit and create lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * sequentiality; this is because the set_locked() function below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * does not imply a full barrier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * The PV pv_wait_head_or_lock function, if active, will acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * the lock and return a non-zero value. So we have to skip the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * atomic_cond_read_acquire() call. As the next PV queue head hasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * been designated yet, there is no way for the locked value to become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * _Q_SLOW_VAL. So both the set_locked() and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * atomic_cmpxchg_relaxed() calls will be safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * If PV isn't active, 0 will be returned instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if ((val = pv_wait_head_or_lock(lock, node)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) goto locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) locked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * claim the lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * n,0,0 -> 0,0,1 : lock, uncontended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * *,*,0 -> *,*,1 : lock, contended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * If the queue head is the only one in the queue (lock value == tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * and nobody is pending, clear the tail code and grab the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * Otherwise, we only need to grab the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * In the PV case we might already have _Q_LOCKED_VAL set, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * of lock stealing; therefore we must also allow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * n,0,1 -> 0,0,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * above wait condition, therefore any concurrent setting of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * PENDING will make the uncontended transition fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if ((val & _Q_TAIL_MASK) == tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) goto release; /* No contention */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * Either somebody is queued behind us or _Q_PENDING_VAL got set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * which will then detect the remaining tail and queue behind us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * ensuring we'll see a @next.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) set_locked(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * contended path; wait for next if not observed yet, release.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (!next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) next = smp_cond_load_relaxed(&node->next, (VAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) arch_mcs_spin_unlock_contended(&next->locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) pv_kick_node(lock, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * release the node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) __this_cpu_dec(qnodes[0].mcs.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) EXPORT_SYMBOL(queued_spin_lock_slowpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * Generate the paravirt code for queued_spin_unlock_slowpath().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) #define _GEN_PV_LOCK_SLOWPATH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) #undef pv_enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) #define pv_enabled() true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #undef pv_init_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) #undef pv_wait_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) #undef pv_kick_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #undef pv_wait_head_or_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #undef queued_spin_lock_slowpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) #include "qspinlock_paravirt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) #include "qspinlock.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) bool nopvspin __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static __init int parse_nopvspin(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) nopvspin = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) early_param("nopvspin", parse_nopvspin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #endif