^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * S390 version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright IBM Corp. 1999
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Derived from "include/asm-i386/spinlock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #ifndef __ASM_SPINLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define __ASM_SPINLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/atomic_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) extern int spin_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) bool arch_vcpu_is_preempted(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define vcpu_is_preempted arch_vcpu_is_preempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Simple spin lock operations. There are two variants, one clears IRQ's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * on the local processor, one does not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * We make no fairness assumptions. They have a cost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * (the type definitions are in asm/spinlock_types.h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void arch_spin_relax(arch_spinlock_t *lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define arch_spin_relax arch_spin_relax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void arch_spin_lock_wait(arch_spinlock_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int arch_spin_trylock_retry(arch_spinlock_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) void arch_spin_lock_setup(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static inline u32 arch_spin_lockval(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return cpu + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return lock.lock == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static inline int arch_spin_is_locked(arch_spinlock_t *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return READ_ONCE(lp->lock) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static inline void arch_spin_lock(arch_spinlock_t *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (!arch_spin_trylock_once(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) arch_spin_lock_wait(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (!arch_spin_trylock_once(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) arch_spin_lock_wait(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define arch_spin_lock_flags arch_spin_lock_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static inline int arch_spin_trylock(arch_spinlock_t *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!arch_spin_trylock_once(lp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return arch_spin_trylock_retry(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static inline void arch_spin_unlock(arch_spinlock_t *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) typecheck(int, lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) asm_inline volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) " sth %1,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) : "=Q" (((unsigned short *) &lp->lock)[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) : "d" (0) : "cc", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Read-write spinlocks, allowing multiple readers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * but only one writer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * NOTE! it is quite common to have readers in interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * but no interrupt writers. For those circumstances we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * can "mix" irq-safe locks - any writer needs to get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * irq-safe write-lock, but readers can get non-irqsafe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * read-locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define arch_read_relax(rw) barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define arch_write_relax(rw) barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) void arch_read_lock_wait(arch_rwlock_t *lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) void arch_write_lock_wait(arch_rwlock_t *lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static inline void arch_read_lock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) old = __atomic_add(1, &rw->cnts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (old & 0xffff0000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) arch_read_lock_wait(rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static inline void arch_read_unlock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __atomic_add_const_barrier(-1, &rw->cnts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static inline void arch_write_lock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) arch_write_lock_wait(rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static inline void arch_write_unlock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) __atomic_add_barrier(-0x30000, &rw->cnts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static inline int arch_read_trylock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) old = READ_ONCE(rw->cnts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return (!(old & 0xffff0000) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) __atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static inline int arch_write_trylock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) old = READ_ONCE(rw->cnts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #endif /* __ASM_SPINLOCK_H */