^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * include/asm-sh/spinlock-llsc.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2002, 2003 Paul Mundt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2006, 2007 Akio Idehara
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #ifndef __ASM_SH_SPINLOCK_LLSC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define __ASM_SH_SPINLOCK_LLSC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Your basic SMP spinlocks, allowing only a single CPU anywhere
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define arch_spin_is_locked(x) ((x)->lock <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Simple spin lock operations. There are two variants, one clears IRQ's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * on the local processor, one does not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * We make no fairness assumptions. They have a cost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static inline void arch_spin_lock(arch_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned long oldval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) __asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) "1: \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) "movli.l @%2, %0 ! arch_spin_lock \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) "mov %0, %1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) "mov #0, %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) "movco.l %0, @%2 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) "bf 1b \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) "cmp/pl %1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) "bf 1b \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) : "=&z" (tmp), "=&r" (oldval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) : "r" (&lock->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) : "t", "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static inline void arch_spin_unlock(arch_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* This could be optimised with ARCH_HAS_MMIOWB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) mmiowb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) __asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) "mov #1, %0 ! arch_spin_unlock \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) "mov.l %0, @%1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) : "=&z" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) : "r" (&lock->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) : "t", "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline int arch_spin_trylock(arch_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned long tmp, oldval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) __asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) "1: \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) "movli.l @%2, %0 ! arch_spin_trylock \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) "mov %0, %1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) "mov #0, %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) "movco.l %0, @%2 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) "bf 1b \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) "synco \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) : "=&z" (tmp), "=&r" (oldval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) : "r" (&lock->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) : "t", "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return oldval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Read-write spinlocks, allowing multiple readers but only one writer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * NOTE! it is quite common to have readers in interrupts but no interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * writers. For those circumstances we can "mix" irq-safe locks - any writer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * needs to get a irq-safe write-lock, but readers can get non-irqsafe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * read-locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static inline void arch_read_lock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) __asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) "1: \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) "movli.l @%1, %0 ! arch_read_lock \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) "cmp/pl %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) "bf 1b \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) "add #-1, %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) "movco.l %0, @%1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) "bf 1b \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) : "=&z" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) : "r" (&rw->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) : "t", "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static inline void arch_read_unlock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) __asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) "1: \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) "movli.l @%1, %0 ! arch_read_unlock \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) "add #1, %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) "movco.l %0, @%1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) "bf 1b \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) : "=&z" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) : "r" (&rw->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) : "t", "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static inline void arch_write_lock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) __asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) "1: \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) "movli.l @%1, %0 ! arch_write_lock \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) "cmp/hs %2, %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) "bf 1b \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) "sub %2, %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) "movco.l %0, @%1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) "bf 1b \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) : "=&z" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) : "t", "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static inline void arch_write_unlock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) __asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) "mov.l %1, @%0 ! arch_write_unlock \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) : "t", "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static inline int arch_read_trylock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned long tmp, oldval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) __asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) "1: \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) "movli.l @%2, %0 ! arch_read_trylock \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) "mov %0, %1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) "cmp/pl %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) "bf 2f \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) "add #-1, %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) "movco.l %0, @%2 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) "bf 1b \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) "2: \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) "synco \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) : "=&z" (tmp), "=&r" (oldval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) : "r" (&rw->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) : "t", "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return (oldval > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static inline int arch_write_trylock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned long tmp, oldval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) __asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) "1: \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) "movli.l @%2, %0 ! arch_write_trylock \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) "mov %0, %1 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) "cmp/hs %3, %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) "bf 2f \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) "sub %3, %0 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) "2: \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) "movco.l %0, @%2 \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) "bf 1b \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) "synco \n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) : "=&z" (tmp), "=&r" (oldval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) : "t", "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return (oldval > (RW_LOCK_BIAS - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #endif /* __ASM_SH_SPINLOCK_LLSC_H */