Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _TOOLS_LINUX_REFCOUNT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _TOOLS_LINUX_REFCOUNT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Variant of atomic_t specialized for reference counts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * The interface matches the atomic_t interface (to aid in porting) but only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * provides the few functions one should use for reference counting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * It differs in that the counter saturates at UINT_MAX and will not move once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * there. This avoids wrapping the counter and causing 'spurious'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * use-after-free issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * and provide only what is strictly required for refcounts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * The increments are fully relaxed; these will not provide ordering. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * rationale is that whatever is used to obtain the object we're increasing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * reference count on will provide the ordering. For locked data structures,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * its the lock acquire, for RCU/lockless data structures its the dependent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * load.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * Do note that inc_not_zero() provides a control dependency which will order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * future stores against the inc, this ensures we'll never modify the object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * if we did not in fact acquire a reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * The decrements will provide release order, such that all the prior loads and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * stores will be issued before, it also provides a control dependency, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * will order us against the subsequent free().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * The control dependency is against the load of the cmpxchg (ll/sc) that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * succeeded. This means the stores aren't fully ordered, but this is fine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * because the 1->0 transition indicates no concurrency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * Note that the allocator is responsible for ordering things between free()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * and alloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #ifdef NDEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define REFCOUNT_WARN(cond, str) (void)(cond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define __refcount_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define REFCOUNT_WARN(cond, str) BUG_ON(cond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define __refcount_check	__must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) typedef struct refcount_struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	atomic_t refs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) } refcount_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static inline void refcount_set(refcount_t *r, unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	atomic_set(&r->refs, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static inline unsigned int refcount_read(const refcount_t *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	return atomic_read(&r->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * Provides no memory ordering, it is assumed the caller has guaranteed the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * object memory to be stable (RCU, etc.). It does provide a control dependency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * and thereby orders future stores. See the comment on top.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static inline __refcount_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) bool refcount_inc_not_zero(refcount_t *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	unsigned int old, new, val = atomic_read(&r->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		new = val + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		if (unlikely(!new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		old = atomic_cmpxchg_relaxed(&r->refs, val, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		if (old == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		val = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * Provides no memory ordering, it is assumed the caller already has a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * reference on the object, will WARN when this is not so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static inline void refcount_inc(refcount_t *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  * decrement when saturated at UINT_MAX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * Provides release memory ordering, such that prior loads and stores are done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  * before, and provides a control dependency such that free() must come after.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  * See the comment on top.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static inline __refcount_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) bool refcount_sub_and_test(unsigned int i, refcount_t *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	unsigned int old, new, val = atomic_read(&r->refs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		if (unlikely(val == UINT_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		new = val - i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		if (new > val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		old = atomic_cmpxchg_release(&r->refs, val, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		if (old == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		val = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	return !new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static inline __refcount_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) bool refcount_dec_and_test(refcount_t *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	return refcount_sub_and_test(1, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #endif /* _ATOMIC_LINUX_REFCOUNT_H */