^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Queue read/write lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Authors: Waiman Long <waiman.long@hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifndef __ASM_GENERIC_QRWLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define __ASM_GENERIC_QRWLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm-generic/qrwlock_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Writer states & reader shift and bias.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define _QW_WAITING 0x100 /* A writer is waiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define _QW_LOCKED 0x0ff /* A writer holds the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define _QW_WMASK 0x1ff /* Writer mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define _QR_SHIFT 9 /* Reader count shift */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define _QR_BIAS (1U << _QR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * External function declarations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) extern void queued_read_lock_slowpath(struct qrwlock *lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) extern void queued_write_lock_slowpath(struct qrwlock *lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * queued_read_trylock - try to acquire read lock of a queue rwlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * @lock : Pointer to queue rwlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Return: 1 if lock acquired, 0 if failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static inline int queued_read_trylock(struct qrwlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u32 cnts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) cnts = atomic_read(&lock->cnts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (likely(!(cnts & _QW_WMASK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (likely(!(cnts & _QW_WMASK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) atomic_sub(_QR_BIAS, &lock->cnts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * queued_write_trylock - try to acquire write lock of a queue rwlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @lock : Pointer to queue rwlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Return: 1 if lock acquired, 0 if failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static inline int queued_write_trylock(struct qrwlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 cnts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) cnts = atomic_read(&lock->cnts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (unlikely(cnts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) _QW_LOCKED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * queued_read_lock - acquire read lock of a queue rwlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @lock: Pointer to queue rwlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static inline void queued_read_lock(struct qrwlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u32 cnts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (likely(!(cnts & _QW_WMASK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* The slowpath will decrement the reader count, if necessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) queued_read_lock_slowpath(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * queued_write_lock - acquire write lock of a queue rwlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * @lock : Pointer to queue rwlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static inline void queued_write_lock(struct qrwlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u32 cnts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Optimize for the unfair lock case where the fair flag is 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) queued_write_lock_slowpath(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * queued_read_unlock - release read lock of a queue rwlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @lock : Pointer to queue rwlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static inline void queued_read_unlock(struct qrwlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Atomically decrement the reader count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * queued_write_unlock - release write lock of a queue rwlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * @lock : Pointer to queue rwlock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static inline void queued_write_unlock(struct qrwlock *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) smp_store_release(&lock->wlocked, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Remapping rwlock architecture specific functions to the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * queue rwlock functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define arch_read_lock(l) queued_read_lock(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define arch_write_lock(l) queued_write_lock(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define arch_read_trylock(l) queued_read_trylock(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define arch_write_trylock(l) queued_write_trylock(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define arch_read_unlock(l) queued_read_unlock(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define arch_write_unlock(l) queued_write_unlock(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #endif /* __ASM_GENERIC_QRWLOCK_H */