Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Generic implementation of 64-bit atomics using spinlocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * useful on processors that don't have 64-bit atomic instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * We use a hashed array of spinlocks to provide exclusive access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * to each atomic64_t variable.  Since this is expected to used on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * systems with small numbers of CPUs (<= 4 or so), we use a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * relatively small array of 16 spinlocks to avoid wasting too much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * memory on the spinlock array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define NR_LOCKS	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * Ensure each lock is in a separate cacheline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	raw_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	char pad[L1_CACHE_BYTES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	[0 ... (NR_LOCKS - 1)] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 		.lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	unsigned long addr = (unsigned long) v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	addr >>= L1_CACHE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	addr ^= (addr >> 8) ^ (addr >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) s64 atomic64_read(const atomic64_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	raw_spinlock_t *lock = lock_addr(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	s64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	val = v->counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) EXPORT_SYMBOL(atomic64_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) void atomic64_set(atomic64_t *v, s64 i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	raw_spinlock_t *lock = lock_addr(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	v->counter = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) EXPORT_SYMBOL(atomic64_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define ATOMIC64_OP(op, c_op)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) void atomic64_##op(s64 a, atomic64_t *v)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	unsigned long flags;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	raw_spinlock_t *lock = lock_addr(v);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	raw_spin_lock_irqsave(lock, flags);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	v->counter c_op a;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	raw_spin_unlock_irqrestore(lock, flags);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) EXPORT_SYMBOL(atomic64_##op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #define ATOMIC64_OP_RETURN(op, c_op)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) s64 atomic64_##op##_return(s64 a, atomic64_t *v)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	unsigned long flags;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	raw_spinlock_t *lock = lock_addr(v);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	s64 val;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	raw_spin_lock_irqsave(lock, flags);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	val = (v->counter c_op a);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	raw_spin_unlock_irqrestore(lock, flags);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	return val;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) EXPORT_SYMBOL(atomic64_##op##_return);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #define ATOMIC64_FETCH_OP(op, c_op)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) s64 atomic64_fetch_##op(s64 a, atomic64_t *v)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	unsigned long flags;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	raw_spinlock_t *lock = lock_addr(v);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	s64 val;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	raw_spin_lock_irqsave(lock, flags);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	val = v->counter;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	v->counter c_op a;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	raw_spin_unlock_irqrestore(lock, flags);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	return val;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) EXPORT_SYMBOL(atomic64_fetch_##op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define ATOMIC64_OPS(op, c_op)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	ATOMIC64_OP(op, c_op)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	ATOMIC64_OP_RETURN(op, c_op)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	ATOMIC64_FETCH_OP(op, c_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ATOMIC64_OPS(add, +=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ATOMIC64_OPS(sub, -=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #undef ATOMIC64_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define ATOMIC64_OPS(op, c_op)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	ATOMIC64_OP(op, c_op)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	ATOMIC64_OP_RETURN(op, c_op)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	ATOMIC64_FETCH_OP(op, c_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ATOMIC64_OPS(and, &=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ATOMIC64_OPS(or, |=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ATOMIC64_OPS(xor, ^=)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #undef ATOMIC64_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #undef ATOMIC64_FETCH_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #undef ATOMIC64_OP_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #undef ATOMIC64_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) s64 atomic64_dec_if_positive(atomic64_t *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	raw_spinlock_t *lock = lock_addr(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	s64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	val = v->counter - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (val >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		v->counter = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) EXPORT_SYMBOL(atomic64_dec_if_positive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	raw_spinlock_t *lock = lock_addr(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	s64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	val = v->counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if (val == o)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		v->counter = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) EXPORT_SYMBOL(atomic64_cmpxchg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) s64 atomic64_xchg(atomic64_t *v, s64 new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	raw_spinlock_t *lock = lock_addr(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	s64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	val = v->counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	v->counter = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) EXPORT_SYMBOL(atomic64_xchg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	raw_spinlock_t *lock = lock_addr(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	s64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	raw_spin_lock_irqsave(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	val = v->counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (val != u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		v->counter += a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	raw_spin_unlock_irqrestore(lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) EXPORT_SYMBOL(atomic64_fetch_add_unless);