^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Queued read/write locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Authors: Waiman Long <waiman.long@hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/qrwlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * queued_read_lock_slowpath - acquire read lock of a queue rwlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * @lock: Pointer to queue rwlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) void queued_read_lock_slowpath(struct qrwlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Readers come here when they cannot get the lock without waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (unlikely(in_interrupt())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Readers in interrupt context will get the lock immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * if the writer is just waiting (not holding the lock yet),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * so spin with ACQUIRE semantics until the lock is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * without waiting in the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) atomic_sub(_QR_BIAS, &lock->cnts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Put the reader into the wait queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) arch_spin_lock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) atomic_add(_QR_BIAS, &lock->cnts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * The ACQUIRE semantics of the following spinning code ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * that accesses can't leak upwards out of our subsequent critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * section in the case that the lock is currently held for write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Signal the next one in queue to become queue head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) arch_spin_unlock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) EXPORT_SYMBOL(queued_read_lock_slowpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * queued_write_lock_slowpath - acquire write lock of a queue rwlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * @lock : Pointer to queue rwlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) void queued_write_lock_slowpath(struct qrwlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int cnts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* Put the writer into the wait queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) arch_spin_lock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Try to acquire the lock directly if no reader is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (!atomic_read(&lock->cnts) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Set the waiting flag to notify readers that a writer is pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) atomic_add(_QW_WAITING, &lock->cnts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* When no more readers or writers, set the locked flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) arch_spin_unlock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) EXPORT_SYMBOL(queued_write_lock_slowpath);