^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * include/asm-sh/spinlock-cas.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2015 SEI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef __ASM_SH_SPINLOCK_CAS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define __ASM_SH_SPINLOCK_CAS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) __asm__ __volatile__("cas.l %1,%0,@r0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) : "+r"(new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) : "r"(old), "z"(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) : "t", "memory" );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Your basic SMP spinlocks, allowing only a single CPU anywhere
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define arch_spin_is_locked(x) ((x)->lock <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static inline void arch_spin_lock(arch_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) while (!__sl_cas(&lock->lock, 1, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static inline void arch_spin_unlock(arch_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) __sl_cas(&lock->lock, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static inline int arch_spin_trylock(arch_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return __sl_cas(&lock->lock, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Read-write spinlocks, allowing multiple readers but only one writer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * NOTE! it is quite common to have readers in interrupts but no interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * writers. For those circumstances we can "mix" irq-safe locks - any writer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * needs to get a irq-safe write-lock, but readers can get non-irqsafe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * read-locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline void arch_read_lock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) do old = rw->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) while (!old || __sl_cas(&rw->lock, old, old-1) != old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static inline void arch_read_unlock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) do old = rw->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) while (__sl_cas(&rw->lock, old, old+1) != old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline void arch_write_lock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) while (__sl_cas(&rw->lock, RW_LOCK_BIAS, 0) != RW_LOCK_BIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static inline void arch_write_unlock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) __sl_cas(&rw->lock, 0, RW_LOCK_BIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static inline int arch_read_trylock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) do old = rw->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) while (old && __sl_cas(&rw->lock, old, old-1) != old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return !!old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static inline int arch_write_trylock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #endif /* __ASM_SH_SPINLOCK_CAS_H */