Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2012 Regents of the University of California
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2017 SiFive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #ifndef _ASM_RISCV_ATOMIC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define _ASM_RISCV_ATOMIC_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #ifdef CONFIG_GENERIC_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) # include <asm-generic/atomic64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) # if (__riscv_xlen < 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #  error "64-bit atomics require XLEN to be at least 64"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) # endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/cmpxchg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define __atomic_acquire_fence()					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define __atomic_release_fence()					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	__asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static __always_inline int atomic_read(const atomic_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	return READ_ONCE(v->counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) static __always_inline void atomic_set(atomic_t *v, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	WRITE_ONCE(v->counter, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #ifndef CONFIG_GENERIC_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define ATOMIC64_INIT(i) { (i) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static __always_inline s64 atomic64_read(const atomic64_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	return READ_ONCE(v->counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static __always_inline void atomic64_set(atomic64_t *v, s64 i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	WRITE_ONCE(v->counter, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * First, the atomic ops that have no ordering constraints and therefor don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * have the AQ or RL bits set.  These don't return anything, so there's only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * one version to worry about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static __always_inline							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	__asm__ __volatile__ (						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		"	amo" #asm_op "." #asm_type " zero, %1, %0"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		: "+A" (v->counter)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		: "r" (I)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		: "memory");						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #ifdef CONFIG_GENERIC_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define ATOMIC_OPS(op, asm_op, I)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)         ATOMIC_OP (op, asm_op, I, w, int,   )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define ATOMIC_OPS(op, asm_op, I)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)         ATOMIC_OP (op, asm_op, I, w, int,   )				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)         ATOMIC_OP (op, asm_op, I, d, s64, 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) ATOMIC_OPS(add, add,  i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) ATOMIC_OPS(sub, add, -i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) ATOMIC_OPS(and, and,  i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) ATOMIC_OPS( or,  or,  i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) ATOMIC_OPS(xor, xor,  i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) #undef ATOMIC_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #undef ATOMIC_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * Atomic ops that have ordered, relaxed, acquire, and release variants.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * There's two flavors of these: the arithmatic ops have both fetch and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * versions, while the logical ops only have fetch versions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static __always_inline							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) c_type atomic##prefix##_fetch_##op##_relaxed(c_type i,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 					     atomic##prefix##_t *v)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	register c_type ret;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	__asm__ __volatile__ (						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		"	amo" #asm_op "." #asm_type " %1, %2, %0"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		: "+A" (v->counter), "=r" (ret)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		: "r" (I)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		: "memory");						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	return ret;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static __always_inline							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	register c_type ret;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	__asm__ __volatile__ (						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		"	amo" #asm_op "." #asm_type ".aqrl  %1, %2, %0"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		: "+A" (v->counter), "=r" (ret)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		: "r" (I)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		: "memory");						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	return ret;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static __always_inline							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) c_type atomic##prefix##_##op##_return_relaxed(c_type i,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 					      atomic##prefix##_t *v)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)         return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static __always_inline							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)         return atomic##prefix##_fetch_##op(i, v) c_op I;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #ifdef CONFIG_GENERIC_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define ATOMIC_OPS(op, asm_op, c_op, I)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)         ATOMIC_FETCH_OP( op, asm_op,       I, w, int,   )		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)         ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int,   )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define ATOMIC_OPS(op, asm_op, c_op, I)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)         ATOMIC_FETCH_OP( op, asm_op,       I, w, int,   )		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)         ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int,   )		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)         ATOMIC_FETCH_OP( op, asm_op,       I, d, s64, 64)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)         ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ATOMIC_OPS(add, add, +,  i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ATOMIC_OPS(sub, add, +, -i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define atomic_add_return_relaxed	atomic_add_return_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define atomic_sub_return_relaxed	atomic_sub_return_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define atomic_add_return		atomic_add_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define atomic_sub_return		atomic_sub_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define atomic_fetch_add		atomic_fetch_add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define atomic_fetch_sub		atomic_fetch_sub
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #ifndef CONFIG_GENERIC_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define atomic64_add_return_relaxed	atomic64_add_return_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define atomic64_add_return		atomic64_add_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define atomic64_sub_return		atomic64_sub_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define atomic64_fetch_add		atomic64_fetch_add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define atomic64_fetch_sub		atomic64_fetch_sub
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #undef ATOMIC_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #ifdef CONFIG_GENERIC_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define ATOMIC_OPS(op, asm_op, I)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)         ATOMIC_FETCH_OP(op, asm_op, I, w, int,   )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define ATOMIC_OPS(op, asm_op, I)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)         ATOMIC_FETCH_OP(op, asm_op, I, w, int,   )			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)         ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ATOMIC_OPS(and, and, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ATOMIC_OPS( or,  or, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ATOMIC_OPS(xor, xor, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define atomic_fetch_and		atomic_fetch_and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define atomic_fetch_or			atomic_fetch_or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define atomic_fetch_xor		atomic_fetch_xor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #ifndef CONFIG_GENERIC_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define atomic64_fetch_and		atomic64_fetch_and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define atomic64_fetch_or		atomic64_fetch_or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define atomic64_fetch_xor		atomic64_fetch_xor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #undef ATOMIC_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #undef ATOMIC_FETCH_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #undef ATOMIC_OP_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* This is required to provide a full barrier on success. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)        int prev, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	__asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		"0:	lr.w     %[p],  %[c]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		"	beq      %[p],  %[u], 1f\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		"	add      %[rc], %[p], %[a]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		"	sc.w.rl  %[rc], %[rc], %[c]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		"	bnez     %[rc], 0b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		"	fence    rw, rw\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		"1:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		: [a]"r" (a), [u]"r" (u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		: "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	return prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define atomic_fetch_add_unless atomic_fetch_add_unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #ifndef CONFIG_GENERIC_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)        s64 prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)        long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	__asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		"0:	lr.d     %[p],  %[c]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		"	beq      %[p],  %[u], 1f\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		"	add      %[rc], %[p], %[a]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		"	sc.d.rl  %[rc], %[rc], %[c]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		"	bnez     %[rc], 0b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		"	fence    rw, rw\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		"1:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		: [a]"r" (a), [u]"r" (u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		: "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	return prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define atomic64_fetch_add_unless atomic64_fetch_add_unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * {cmp,}xchg and the operations that return, so they need a full barrier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #define ATOMIC_OP(c_t, prefix, size)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static __always_inline							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	return __xchg_relaxed(&(v->counter), n, size);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static __always_inline							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	return __xchg_acquire(&(v->counter), n, size);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static __always_inline							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	return __xchg_release(&(v->counter), n, size);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static __always_inline							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	return __xchg(&(v->counter), n, size);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static __always_inline							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 				     c_t o, c_t n)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	return __cmpxchg_relaxed(&(v->counter), o, n, size);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static __always_inline							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 				     c_t o, c_t n)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	return __cmpxchg_acquire(&(v->counter), o, n, size);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static __always_inline							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 				     c_t o, c_t n)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	return __cmpxchg_release(&(v->counter), o, n, size);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static __always_inline							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	return __cmpxchg(&(v->counter), o, n, size);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #ifdef CONFIG_GENERIC_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #define ATOMIC_OPS()							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	ATOMIC_OP(int,   , 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #define ATOMIC_OPS()							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	ATOMIC_OP(int,   , 4)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	ATOMIC_OP(s64, 64, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) ATOMIC_OPS()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) #define atomic_xchg_relaxed atomic_xchg_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #define atomic_xchg_acquire atomic_xchg_acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) #define atomic_xchg_release atomic_xchg_release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #define atomic_xchg atomic_xchg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #define atomic_cmpxchg_release atomic_cmpxchg_release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #define atomic_cmpxchg atomic_cmpxchg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #undef ATOMIC_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #undef ATOMIC_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)        int prev, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	__asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		"0:	lr.w     %[p],  %[c]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		"	sub      %[rc], %[p], %[o]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		"	bltz     %[rc], 1f\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		"	sc.w.rl  %[rc], %[rc], %[c]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		"	bnez     %[rc], 0b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		"	fence    rw, rw\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		"1:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		: [o]"r" (offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		: "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	return prev - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #define atomic_dec_if_positive(v)	atomic_sub_if_positive(v, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #ifndef CONFIG_GENERIC_ATOMIC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)        s64 prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)        long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	__asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		"0:	lr.d     %[p],  %[c]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		"	sub      %[rc], %[p], %[o]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		"	bltz     %[rc], 1f\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		"	sc.d.rl  %[rc], %[rc], %[c]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		"	bnez     %[rc], 0b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		"	fence    rw, rw\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		"1:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		: [o]"r" (offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		: "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	return prev - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) #define atomic64_dec_if_positive(v)	atomic64_sub_if_positive(v, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #endif /* _ASM_RISCV_ATOMIC_H */