^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef _ASM_ARC_ATOMIC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define _ASM_ARC_ATOMIC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/cmpxchg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define atomic_read(v) READ_ONCE((v)->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #ifdef CONFIG_ARC_HAS_LLSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define ATOMIC_OP(op, c_op, asm_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static inline void atomic_##op(int i, atomic_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned int val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) __asm__ __volatile__( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) "1: llock %[val], [%[ctr]] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) " " #asm_op " %[val], %[val], %[i] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) " scond %[val], [%[ctr]] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) " bnz 1b \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) [i] "ir" (i) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) : "cc"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static inline int atomic_##op##_return(int i, atomic_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned int val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Explicit full memory barrier needed before/after as \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * LLOCK/SCOND themselves don't provide any such semantics \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) smp_mb(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) __asm__ __volatile__( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) "1: llock %[val], [%[ctr]] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) " " #asm_op " %[val], %[val], %[i] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) " scond %[val], [%[ctr]] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) " bnz 1b \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) : [val] "=&r" (val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) : [ctr] "r" (&v->counter), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) [i] "ir" (i) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) : "cc"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) smp_mb(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline int atomic_fetch_##op(int i, atomic_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned int val, orig; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Explicit full memory barrier needed before/after as \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * LLOCK/SCOND themselves don't provide any such semantics \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) smp_mb(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) __asm__ __volatile__( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) "1: llock %[orig], [%[ctr]] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) " " #asm_op " %[val], %[orig], %[i] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) " scond %[val], [%[ctr]] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) " bnz 1b \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) : [val] "=&r" (val), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) [orig] "=&r" (orig) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) : [ctr] "r" (&v->counter), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) [i] "ir" (i) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) : "cc"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) smp_mb(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return orig; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #else /* !CONFIG_ARC_HAS_LLSC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* violating atomic_xxx API locking protocol in UP for optimization sake */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static inline void atomic_set(atomic_t *v, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * Independent of hardware support, all of the atomic_xxx() APIs need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * to follow the same locking rules to make sure that a "hardware"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Thus atomic_set() despite being 1 insn (and seemingly atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * requires the locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) atomic_ops_lock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) WRITE_ONCE(v->counter, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) atomic_ops_unlock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define atomic_set_release(v, i) atomic_set((v), (i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Non hardware assisted Atomic-R-M-W
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define ATOMIC_OP(op, c_op, asm_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static inline void atomic_##op(int i, atomic_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) atomic_ops_lock(flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) v->counter c_op i; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) atomic_ops_unlock(flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static inline int atomic_##op##_return(int i, atomic_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long temp; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * spin lock/unlock provides the needed smp_mb() before/after \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) atomic_ops_lock(flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) temp = v->counter; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) temp c_op i; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) v->counter = temp; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) atomic_ops_unlock(flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return temp; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline int atomic_fetch_##op(int i, atomic_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned long orig; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * spin lock/unlock provides the needed smp_mb() before/after \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) atomic_ops_lock(flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) orig = v->counter; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) v->counter c_op i; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) atomic_ops_unlock(flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return orig; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #endif /* !CONFIG_ARC_HAS_LLSC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define ATOMIC_OPS(op, c_op, asm_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ATOMIC_OP(op, c_op, asm_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ATOMIC_OP_RETURN(op, c_op, asm_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ATOMIC_FETCH_OP(op, c_op, asm_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ATOMIC_OPS(add, +=, add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ATOMIC_OPS(sub, -=, sub)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define atomic_andnot atomic_andnot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define atomic_fetch_andnot atomic_fetch_andnot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #undef ATOMIC_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define ATOMIC_OPS(op, c_op, asm_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ATOMIC_OP(op, c_op, asm_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ATOMIC_FETCH_OP(op, c_op, asm_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ATOMIC_OPS(and, &=, and)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ATOMIC_OPS(andnot, &= ~, bic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ATOMIC_OPS(or, |=, or)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ATOMIC_OPS(xor, ^=, xor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #undef ATOMIC_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #undef ATOMIC_FETCH_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #undef ATOMIC_OP_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #undef ATOMIC_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #ifdef CONFIG_GENERIC_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #include <asm-generic/atomic64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #else /* Kconfig ensures this is only enabled with needed h/w assist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * - The address HAS to be 64-bit aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * - There are 2 semantics involved here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * = exclusive implies no interim update between load/store to same addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * = both words are observed/updated together: this is guaranteed even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * is NOT required to use LLOCKD+SCONDD, STD suffices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) typedef struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) s64 __aligned(8) counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) } atomic64_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define ATOMIC64_INIT(a) { (a) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static inline s64 atomic64_read(const atomic64_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) s64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) " ldd %0, [%1] \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) : "=r"(val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) : "r"(&v->counter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static inline void atomic64_set(atomic64_t *v, s64 a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * This could have been a simple assignment in "C" but would need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * explicit volatile. Otherwise gcc optimizers could elide the store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * which borked atomic64 self-test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * In the inline asm version, memory clobber needed for exact same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * reason, to tell gcc about the store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * This however is not needed for sibling atomic64_add() etc since both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * load/store are explicitly done in inline asm. As long as API is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * for each access, gcc has no way to optimize away any load/store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) " std %0, [%1] \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) : "r"(a), "r"(&v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #define ATOMIC64_OP(op, op1, op2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static inline void atomic64_##op(s64 a, atomic64_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) s64 val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) __asm__ __volatile__( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) "1: \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) " llockd %0, [%1] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) " " #op1 " %L0, %L0, %L2 \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) " " #op2 " %H0, %H0, %H2 \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) " scondd %0, [%1] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) " bnz 1b \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) : "=&r"(val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) : "r"(&v->counter), "ir"(a) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) : "cc"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define ATOMIC64_OP_RETURN(op, op1, op2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) s64 val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) smp_mb(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) __asm__ __volatile__( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) "1: \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) " llockd %0, [%1] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) " " #op1 " %L0, %L0, %L2 \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) " " #op2 " %H0, %H0, %H2 \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) " scondd %0, [%1] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) " bnz 1b \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) : [val] "=&r"(val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) : "r"(&v->counter), "ir"(a) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) : "cc"); /* memory clobber comes from smp_mb() */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) smp_mb(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #define ATOMIC64_FETCH_OP(op, op1, op2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) s64 val, orig; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) smp_mb(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) __asm__ __volatile__( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) "1: \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) " llockd %0, [%2] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) " " #op1 " %L1, %L0, %L3 \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) " " #op2 " %H1, %H0, %H3 \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) " scondd %1, [%2] \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) " bnz 1b \n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) : "=&r"(orig), "=&r"(val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) : "r"(&v->counter), "ir"(a) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) : "cc"); /* memory clobber comes from smp_mb() */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) smp_mb(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return orig; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #define ATOMIC64_OPS(op, op1, op2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ATOMIC64_OP(op, op1, op2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ATOMIC64_OP_RETURN(op, op1, op2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ATOMIC64_FETCH_OP(op, op1, op2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #define atomic64_andnot atomic64_andnot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #define atomic64_fetch_andnot atomic64_fetch_andnot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ATOMIC64_OPS(add, add.f, adc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ATOMIC64_OPS(sub, sub.f, sbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ATOMIC64_OPS(and, and, and)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ATOMIC64_OPS(andnot, bic, bic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ATOMIC64_OPS(or, or, or)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ATOMIC64_OPS(xor, xor, xor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #undef ATOMIC64_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #undef ATOMIC64_FETCH_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) #undef ATOMIC64_OP_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) #undef ATOMIC64_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static inline s64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) s64 prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) "1: llockd %0, [%1] \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) " brne %L0, %L2, 2f \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) " brne %H0, %H2, 2f \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) " scondd %3, [%1] \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) " bnz 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) "2: \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) : "=&r"(prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) : "r"(ptr), "ir"(expected), "r"(new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) : "cc"); /* memory clobber comes from smp_mb() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) s64 prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) "1: llockd %0, [%1] \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) " scondd %2, [%1] \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) " bnz 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) "2: \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) : "=&r"(prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) : "r"(ptr), "r"(new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) : "cc"); /* memory clobber comes from smp_mb() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * atomic64_dec_if_positive - decrement by 1 if old value positive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * @v: pointer of type atomic64_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * The function returns the old value of *v minus 1, even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * the atomic variable, v, was not decremented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static inline s64 atomic64_dec_if_positive(atomic64_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) s64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) "1: llockd %0, [%1] \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) " sub.c %H0, %H0, 1 # if C set, w1 - 1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) " brlt %H0, 0, 2f \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) " scondd %0, [%1] \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) " bnz 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) "2: \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) : "=&r"(val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) : "r"(&v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) : "cc"); /* memory clobber comes from smp_mb() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #define atomic64_dec_if_positive atomic64_dec_if_positive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * atomic64_fetch_add_unless - add unless the number is a given value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * @v: pointer of type atomic64_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * @a: the amount to add to v...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * @u: ...unless v is equal to u.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * Atomically adds @a to @v, if it was not @u.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * Returns the old value of @v
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) s64 old, temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) "1: llockd %0, [%2] \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) " brne %L0, %L4, 2f # continue to add since v != u \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) " breq.d %H0, %H4, 3f # return since v == u \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) "2: \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) " add.f %L1, %L0, %L3 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) " adc %H1, %H0, %H3 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) " scondd %1, [%2] \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) " bnz 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) "3: \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) : "=&r"(old), "=&r" (temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) : "r"(&v->counter), "r"(a), "r"(u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) : "cc"); /* memory clobber comes from smp_mb() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) #define atomic64_fetch_add_unless atomic64_fetch_add_unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) #endif /* !CONFIG_GENERIC_ATOMIC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) #endif /* !__ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) #endif