^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #ifndef __ASM_CSKY_SPINLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #define __ASM_CSKY_SPINLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/spinlock_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifdef CONFIG_QUEUED_RWLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Ticket-based spin-locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static inline void arch_spin_lock(arch_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) arch_spinlock_t lockval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) u32 ticket_next = 1 << TICKET_NEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) u32 *p = &lock->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) asm volatile (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) "1: ldex.w %0, (%2) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) " mov %1, %0 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) " add %0, %3 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) " stex.w %0, (%2) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) " bez %0, 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) : "=&r" (tmp), "=&r" (lockval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) : "r"(p), "r"(ticket_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) while (lockval.tickets.next != lockval.tickets.owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static inline int arch_spin_trylock(arch_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u32 tmp, contended, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u32 ticket_next = 1 << TICKET_NEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u32 *p = &lock->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) asm volatile (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) " ldex.w %0, (%3) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) " movi %2, 1 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) " rotli %1, %0, 16 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) " cmpne %1, %0 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) " bt 1f \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) " movi %2, 0 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) " add %0, %0, %4 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) " stex.w %0, (%3) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) "1: \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) : "=&r" (res), "=&r" (tmp), "=&r" (contended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) : "r"(p), "r"(ticket_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) } while (!res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (!contended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return !contended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static inline void arch_spin_unlock(arch_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) WRITE_ONCE(lock->tickets.owner, lock->tickets.owner + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return lock.tickets.owner == lock.tickets.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static inline int arch_spin_is_locked(arch_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return !arch_spin_value_unlocked(READ_ONCE(*lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline int arch_spin_is_contended(arch_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct __raw_tickets tickets = READ_ONCE(lock->tickets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return (tickets.next - tickets.owner) > 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define arch_spin_is_contended arch_spin_is_contended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #include <asm/qrwlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* See include/linux/spinlock.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define smp_mb__after_spinlock() smp_mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #else /* CONFIG_QUEUED_RWLOCKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * Test-and-set spin-locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static inline void arch_spin_lock(arch_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) u32 *p = &lock->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) asm volatile (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) "1: ldex.w %0, (%1) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) " bnez %0, 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) " movi %0, 1 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) " stex.w %0, (%1) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) " bez %0, 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) : "=&r" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) : "r"(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static inline void arch_spin_unlock(arch_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) WRITE_ONCE(lock->lock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static inline int arch_spin_trylock(arch_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u32 *p = &lock->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) asm volatile (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) "1: ldex.w %0, (%1) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) " bnez %0, 2f \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) " movi %0, 1 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) " stex.w %0, (%1) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) " bez %0, 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) " movi %0, 0 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) "2: \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) : "=&r" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) : "r"(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return !tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * read lock/unlock/trylock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static inline void arch_read_lock(arch_rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) u32 *p = &lock->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) asm volatile (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) "1: ldex.w %0, (%1) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) " blz %0, 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) " addi %0, 1 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) " stex.w %0, (%1) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) " bez %0, 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) : "=&r" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) : "r"(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static inline void arch_read_unlock(arch_rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u32 *p = &lock->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) asm volatile (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) "1: ldex.w %0, (%1) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) " subi %0, 1 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) " stex.w %0, (%1) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) " bez %0, 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) : "=&r" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) : "r"(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static inline int arch_read_trylock(arch_rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u32 *p = &lock->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) asm volatile (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) "1: ldex.w %0, (%1) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) " blz %0, 2f \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) " addi %0, 1 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) " stex.w %0, (%1) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) " bez %0, 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) " movi %0, 0 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) "2: \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) : "=&r" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) : "r"(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return !tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * write lock/unlock/trylock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static inline void arch_write_lock(arch_rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) u32 *p = &lock->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) asm volatile (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) "1: ldex.w %0, (%1) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) " bnez %0, 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) " subi %0, 1 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) " stex.w %0, (%1) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) " bez %0, 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) : "=&r" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) : "r"(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static inline void arch_write_unlock(arch_rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) WRITE_ONCE(lock->lock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static inline int arch_write_trylock(arch_rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u32 *p = &lock->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) asm volatile (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) "1: ldex.w %0, (%1) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) " bnez %0, 2f \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) " subi %0, 1 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) " stex.w %0, (%1) \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) " bez %0, 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) " movi %0, 0 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) "2: \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) : "=&r" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) : "r"(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return !tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #endif /* CONFIG_QUEUED_RWLOCKS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #endif /* __ASM_CSKY_SPINLOCK_H */