Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _ASM_POWERPC_ATOMIC_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _ASM_POWERPC_ATOMIC_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * PowerPC atomic operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #ifdef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/cmpxchg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * on the platform without lwsync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define __atomic_acquire_fence()					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	__asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define __atomic_release_fence()					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) static __inline__ int atomic_read(const atomic_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	int t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static __inline__ void atomic_set(atomic_t *v, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define ATOMIC_OP(op, asm_op)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static __inline__ void atomic_##op(int a, atomic_t *v)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	int t;								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	__asm__ __volatile__(						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) "1:	lwarx	%0,0,%3		# atomic_" #op "\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	#asm_op " %0,%2,%0\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) "	stwcx.	%0,0,%3 \n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) "	bne-	1b\n"							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	: "=&r" (t), "+m" (v->counter)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	: "r" (a), "r" (&v->counter)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	: "cc");							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static inline int atomic_##op##_return_relaxed(int a, atomic_t *v)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	int t;								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	__asm__ __volatile__(						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) "1:	lwarx	%0,0,%3		# atomic_" #op "_return_relaxed\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	#asm_op " %0,%2,%0\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) "	stwcx.	%0,0,%3\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) "	bne-	1b\n"							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	: "=&r" (t), "+m" (v->counter)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	: "r" (a), "r" (&v->counter)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	: "cc");							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	return t;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #define ATOMIC_FETCH_OP_RELAXED(op, asm_op)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	int res, t;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	__asm__ __volatile__(						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) "1:	lwarx	%0,0,%4		# atomic_fetch_" #op "_relaxed\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	#asm_op " %1,%3,%0\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) "	stwcx.	%1,0,%4\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) "	bne-	1b\n"							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	: "r" (a), "r" (&v->counter)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	: "cc");							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	return res;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #define ATOMIC_OPS(op, asm_op)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	ATOMIC_OP(op, asm_op)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	ATOMIC_OP_RETURN_RELAXED(op, asm_op)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) ATOMIC_OPS(add, add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) ATOMIC_OPS(sub, subf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #define atomic_add_return_relaxed atomic_add_return_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #define atomic_sub_return_relaxed atomic_sub_return_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #undef ATOMIC_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define ATOMIC_OPS(op, asm_op)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	ATOMIC_OP(op, asm_op)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	ATOMIC_FETCH_OP_RELAXED(op, asm_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) ATOMIC_OPS(and, and)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ATOMIC_OPS(or, or)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ATOMIC_OPS(xor, xor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define atomic_fetch_or_relaxed  atomic_fetch_or_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #undef ATOMIC_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #undef ATOMIC_FETCH_OP_RELAXED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #undef ATOMIC_OP_RETURN_RELAXED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #undef ATOMIC_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static __inline__ void atomic_inc(atomic_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	int t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) "1:	lwarx	%0,0,%2		# atomic_inc\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	addic	%0,%0,1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) "	stwcx.	%0,0,%2 \n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	bne-	1b"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	: "=&r" (t), "+m" (v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	: "r" (&v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	: "cc", "xer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define atomic_inc atomic_inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	int t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) "1:	lwarx	%0,0,%2		# atomic_inc_return_relaxed\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) "	addic	%0,%0,1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) "	stwcx.	%0,0,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) "	bne-	1b"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	: "=&r" (t), "+m" (v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	: "r" (&v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	: "cc", "xer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static __inline__ void atomic_dec(atomic_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	int t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) "1:	lwarx	%0,0,%2		# atomic_dec\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	addic	%0,%0,-1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) "	stwcx.	%0,0,%2\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	bne-	1b"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	: "=&r" (t), "+m" (v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	: "r" (&v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	: "cc", "xer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define atomic_dec atomic_dec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	int t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) "1:	lwarx	%0,0,%2		# atomic_dec_return_relaxed\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) "	addic	%0,%0,-1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) "	stwcx.	%0,0,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) "	bne-	1b"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	: "=&r" (t), "+m" (v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	: "r" (&v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	: "cc", "xer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define atomic_inc_return_relaxed atomic_inc_return_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define atomic_dec_return_relaxed atomic_dec_return_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define atomic_cmpxchg_relaxed(v, o, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	cmpxchg_relaxed(&((v)->counter), (o), (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define atomic_cmpxchg_acquire(v, o, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	cmpxchg_acquire(&((v)->counter), (o), (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * Don't want to override the generic atomic_try_cmpxchg_acquire, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * we add a lock hint to the lwarx, which may not be wanted for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * _acquire case (and is not used by the other _acquire variants so it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * would be a surprise).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static __always_inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	int r, o = *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	__asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) "1:\t"	PPC_LWARX(%0,0,%2,1) "	# atomic_try_cmpxchg_acquire	\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) "	cmpw	0,%0,%3							\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) "	bne-	2f							\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) "	stwcx.	%4,0,%2							\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) "	bne-	1b							\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) "\t"	PPC_ACQUIRE_BARRIER "						\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) "2:									\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	: "=&r" (r), "+m" (v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	: "r" (&v->counter), "r" (o), "r" (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	: "cr0", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (unlikely(r != o))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		*old = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	return likely(r == o);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  * atomic_fetch_add_unless - add unless the number is a given value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  * @v: pointer of type atomic_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  * @a: the amount to add to v...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  * @u: ...unless v is equal to u.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * Atomically adds @a to @v, so long as it was not @u.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * Returns the old value of @v.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	int t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	__asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	PPC_ATOMIC_ENTRY_BARRIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) "1:	lwarx	%0,0,%1		# atomic_fetch_add_unless\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	cmpw	0,%0,%3 \n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	beq	2f \n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	add	%0,%2,%0 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) "	stwcx.	%0,0,%1 \n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	bne-	1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	PPC_ATOMIC_EXIT_BARRIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) "	subf	%0,%2,%0 \n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 2:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	: "=&r" (t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	: "r" (&v->counter), "r" (a), "r" (u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	: "cc", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #define atomic_fetch_add_unless atomic_fetch_add_unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  * atomic_inc_not_zero - increment unless the number is zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  * @v: pointer of type atomic_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * Atomically increments @v by 1, so long as @v is non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  * Returns non-zero if @v was non-zero, and zero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static __inline__ int atomic_inc_not_zero(atomic_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	int t1, t2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	__asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	PPC_ATOMIC_ENTRY_BARRIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) "1:	lwarx	%0,0,%2		# atomic_inc_not_zero\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	cmpwi	0,%0,0\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	beq-	2f\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	addic	%1,%0,1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) "	stwcx.	%1,0,%2\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	bne-	1b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	PPC_ATOMIC_EXIT_BARRIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	"\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 2:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	: "=&r" (t1), "=&r" (t2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	: "r" (&v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	: "cc", "xer", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	return t1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  * Atomically test *v and decrement if it is greater than 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  * The function returns the old value of *v minus 1, even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * the atomic variable, v, was not decremented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static __inline__ int atomic_dec_if_positive(atomic_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	int t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	PPC_ATOMIC_ENTRY_BARRIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) "1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	cmpwi	%0,1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	addi	%0,%0,-1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	blt-	2f\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) "	stwcx.	%0,0,%1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	bne-	1b"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	PPC_ATOMIC_EXIT_BARRIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	"\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 2:"	: "=&b" (t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	: "r" (&v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	: "cc", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #define atomic_dec_if_positive atomic_dec_if_positive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #ifdef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #define ATOMIC64_INIT(i)	{ (i) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static __inline__ s64 atomic64_read(const atomic64_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	s64 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static __inline__ void atomic64_set(atomic64_t *v, s64 i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #define ATOMIC64_OP(op, asm_op)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static __inline__ void atomic64_##op(s64 a, atomic64_t *v)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	s64 t;								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	__asm__ __volatile__(						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) "1:	ldarx	%0,0,%3		# atomic64_" #op "\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	#asm_op " %0,%2,%0\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) "	stdcx.	%0,0,%3 \n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) "	bne-	1b\n"							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	: "=&r" (t), "+m" (v->counter)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	: "r" (a), "r" (&v->counter)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	: "cc");							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static inline s64							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) atomic64_##op##_return_relaxed(s64 a, atomic64_t *v)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	s64 t;								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	__asm__ __volatile__(						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) "1:	ldarx	%0,0,%3		# atomic64_" #op "_return_relaxed\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	#asm_op " %0,%2,%0\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) "	stdcx.	%0,0,%3\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) "	bne-	1b\n"							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	: "=&r" (t), "+m" (v->counter)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	: "r" (a), "r" (&v->counter)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	: "cc");							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	return t;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static inline s64							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	s64 res, t;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	__asm__ __volatile__(						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) "1:	ldarx	%0,0,%4		# atomic64_fetch_" #op "_relaxed\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	#asm_op " %1,%3,%0\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) "	stdcx.	%1,0,%4\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) "	bne-	1b\n"							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	: "=&r" (res), "=&r" (t), "+m" (v->counter)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	: "r" (a), "r" (&v->counter)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	: "cc");							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	return res;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) #define ATOMIC64_OPS(op, asm_op)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	ATOMIC64_OP(op, asm_op)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	ATOMIC64_OP_RETURN_RELAXED(op, asm_op)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ATOMIC64_OPS(add, add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ATOMIC64_OPS(sub, subf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #define atomic64_add_return_relaxed atomic64_add_return_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #undef ATOMIC64_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) #define ATOMIC64_OPS(op, asm_op)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	ATOMIC64_OP(op, asm_op)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ATOMIC64_OPS(and, and)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ATOMIC64_OPS(or, or)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ATOMIC64_OPS(xor, xor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) #define atomic64_fetch_or_relaxed  atomic64_fetch_or_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #undef ATOPIC64_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) #undef ATOMIC64_FETCH_OP_RELAXED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #undef ATOMIC64_OP_RETURN_RELAXED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) #undef ATOMIC64_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static __inline__ void atomic64_inc(atomic64_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	s64 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) "1:	ldarx	%0,0,%2		# atomic64_inc\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	addic	%0,%0,1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	stdcx.	%0,0,%2 \n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	bne-	1b"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	: "=&r" (t), "+m" (v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	: "r" (&v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	: "cc", "xer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) #define atomic64_inc atomic64_inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	s64 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) "1:	ldarx	%0,0,%2		# atomic64_inc_return_relaxed\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) "	addic	%0,%0,1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) "	stdcx.	%0,0,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) "	bne-	1b"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	: "=&r" (t), "+m" (v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	: "r" (&v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	: "cc", "xer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static __inline__ void atomic64_dec(atomic64_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	s64 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) "1:	ldarx	%0,0,%2		# atomic64_dec\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	addic	%0,%0,-1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	stdcx.	%0,0,%2\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	bne-	1b"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	: "=&r" (t), "+m" (v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	: "r" (&v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	: "cc", "xer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #define atomic64_dec atomic64_dec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	s64 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) "1:	ldarx	%0,0,%2		# atomic64_dec_return_relaxed\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) "	addic	%0,%0,-1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) "	stdcx.	%0,0,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) "	bne-	1b"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	: "=&r" (t), "+m" (v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	: "r" (&v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	: "cc", "xer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)  * Atomically test *v and decrement if it is greater than 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)  * The function returns the old value of *v minus 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	s64 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	PPC_ATOMIC_ENTRY_BARRIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) "1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	addic.	%0,%0,-1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	blt-	2f\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	stdcx.	%0,0,%1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	bne-	1b"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	PPC_ATOMIC_EXIT_BARRIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	"\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 2:"	: "=&r" (t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	: "r" (&v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	: "cc", "xer", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) #define atomic64_dec_if_positive atomic64_dec_if_positive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) #define atomic64_cmpxchg_relaxed(v, o, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	cmpxchg_relaxed(&((v)->counter), (o), (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) #define atomic64_cmpxchg_acquire(v, o, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	cmpxchg_acquire(&((v)->counter), (o), (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)  * atomic64_fetch_add_unless - add unless the number is a given value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)  * @v: pointer of type atomic64_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)  * @a: the amount to add to v...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)  * @u: ...unless v is equal to u.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)  * Atomically adds @a to @v, so long as it was not @u.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)  * Returns the old value of @v.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	s64 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	__asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	PPC_ATOMIC_ENTRY_BARRIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) "1:	ldarx	%0,0,%1		# atomic64_fetch_add_unless\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	cmpd	0,%0,%3 \n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	beq	2f \n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	add	%0,%2,%0 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) "	stdcx.	%0,0,%1 \n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	bne-	1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	PPC_ATOMIC_EXIT_BARRIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) "	subf	%0,%2,%0 \n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 2:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	: "=&r" (t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	: "r" (&v->counter), "r" (a), "r" (u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	: "cc", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) #define atomic64_fetch_add_unless atomic64_fetch_add_unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)  * atomic_inc64_not_zero - increment unless the number is zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)  * @v: pointer of type atomic64_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)  * Atomically increments @v by 1, so long as @v is non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)  * Returns non-zero if @v was non-zero, and zero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	s64 t1, t2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	__asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	PPC_ATOMIC_ENTRY_BARRIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) "1:	ldarx	%0,0,%2		# atomic64_inc_not_zero\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	cmpdi	0,%0,0\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	beq-	2f\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	addic	%1,%0,1\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	stdcx.	%1,0,%2\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	bne-	1b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	PPC_ATOMIC_EXIT_BARRIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	"\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 2:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	: "=&r" (t1), "=&r" (t2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	: "r" (&v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	: "cc", "xer", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	return t1 != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) #define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #endif /* __powerpc64__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) #endif /* __KERNEL__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #endif /* _ASM_POWERPC_ATOMIC_H_ */