^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef _ASM_PARISC_ATOMIC_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define _ASM_PARISC_ATOMIC_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/cmpxchg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Atomic operations that C can't guarantee us. Useful for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * resource counting etc..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * And probably incredibly slow on parisc. OTOH, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * have to write any serious assembly. prumpf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/cache.h> /* we use L1_CACHE_BYTES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* Use an array of spinlocks for our atomic_ts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Hash function to index into a different SPINLOCK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Since "a" is usually an address, use one spinlock per cacheline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) # define ATOMIC_HASH_SIZE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* Can't use raw_spin_lock_irq because of #include problems, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * this is the substitute */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define _atomic_spin_lock_irqsave(l,f) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) arch_spinlock_t *s = ATOMIC_HASH(l); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) local_irq_save(f); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) arch_spin_lock(s); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define _atomic_spin_unlock_irqrestore(l,f) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) arch_spinlock_t *s = ATOMIC_HASH(l); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) arch_spin_unlock(s); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) local_irq_restore(f); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Note that we need not lock read accesses - aligned word writes/reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * are atomic, so a reader never sees inconsistent values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static __inline__ void atomic_set(atomic_t *v, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) _atomic_spin_lock_irqsave(v, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) v->counter = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) _atomic_spin_unlock_irqrestore(v, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define atomic_set_release(v, i) atomic_set((v), (i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static __inline__ int atomic_read(const atomic_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return READ_ONCE((v)->counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* exported interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define ATOMIC_OP(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static __inline__ void atomic_##op(int i, atomic_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) _atomic_spin_lock_irqsave(v, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) v->counter c_op i; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) _atomic_spin_unlock_irqrestore(v, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define ATOMIC_OP_RETURN(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) _atomic_spin_lock_irqsave(v, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ret = (v->counter c_op i); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) _atomic_spin_unlock_irqrestore(v, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define ATOMIC_FETCH_OP(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) _atomic_spin_lock_irqsave(v, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) ret = v->counter; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) v->counter c_op i; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) _atomic_spin_unlock_irqrestore(v, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define ATOMIC_OPS(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ATOMIC_OP(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ATOMIC_OP_RETURN(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ATOMIC_FETCH_OP(op, c_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ATOMIC_OPS(add, +=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) ATOMIC_OPS(sub, -=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #undef ATOMIC_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define ATOMIC_OPS(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ATOMIC_OP(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ATOMIC_FETCH_OP(op, c_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ATOMIC_OPS(and, &=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) ATOMIC_OPS(or, |=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ATOMIC_OPS(xor, ^=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #undef ATOMIC_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #undef ATOMIC_FETCH_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #undef ATOMIC_OP_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #undef ATOMIC_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define ATOMIC64_INIT(i) { (i) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define ATOMIC64_OP(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) _atomic_spin_lock_irqsave(v, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) v->counter c_op i; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) _atomic_spin_unlock_irqrestore(v, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define ATOMIC64_OP_RETURN(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) s64 ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) _atomic_spin_lock_irqsave(v, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ret = (v->counter c_op i); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) _atomic_spin_unlock_irqrestore(v, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define ATOMIC64_FETCH_OP(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) s64 ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) _atomic_spin_lock_irqsave(v, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ret = v->counter; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) v->counter c_op i; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) _atomic_spin_unlock_irqrestore(v, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define ATOMIC64_OPS(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ATOMIC64_OP(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ATOMIC64_OP_RETURN(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ATOMIC64_FETCH_OP(op, c_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ATOMIC64_OPS(add, +=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ATOMIC64_OPS(sub, -=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #undef ATOMIC64_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define ATOMIC64_OPS(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ATOMIC64_OP(op, c_op) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ATOMIC64_FETCH_OP(op, c_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ATOMIC64_OPS(and, &=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ATOMIC64_OPS(or, |=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ATOMIC64_OPS(xor, ^=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #undef ATOMIC64_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #undef ATOMIC64_FETCH_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #undef ATOMIC64_OP_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #undef ATOMIC64_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static __inline__ void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) atomic64_set(atomic64_t *v, s64 i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) _atomic_spin_lock_irqsave(v, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) v->counter = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) _atomic_spin_unlock_irqrestore(v, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #define atomic64_set_release(v, i) atomic64_set((v), (i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static __inline__ s64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) atomic64_read(const atomic64_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return READ_ONCE((v)->counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* exported interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #define atomic64_cmpxchg(v, o, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #endif /* !CONFIG_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #endif /* _ASM_PARISC_ATOMIC_H_ */