^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #define pr_fmt(fmt) "%s: " fmt, __func__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/percpu-refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Initially, a percpu refcount is just a set of percpu counters. Initially, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * don't try to detect the ref hitting 0 - which means that get/put can just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * increment or decrement the local counter. Note that the counter on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * percpu counters will all sum to the correct value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * (More precisely: because modular arithmetic is commutative the sum of all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * percpu_count vars will be equal to what it would have been if all the gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * and puts were done to a single integer, even if some of the percpu integers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * overflow or underflow).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * The real trick to implementing percpu refcounts is shutdown. We can't detect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * the ref hitting 0 on every put - this would require global synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * and defeat the whole purpose of using percpu refs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * What we do is require the user to keep track of the initial refcount; we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * the ref can't hit 0 before the user drops the initial ref, so as long as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * convert to non percpu mode before the initial ref is dropped everything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Converting to non percpu mode is done with some RCUish stuff in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * percpu_ref_kill. Additionally, we need a bias value so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * atomic_long_t can't hit 0 before we've added up all the percpu refs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static DEFINE_SPINLOCK(percpu_ref_switch_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return (unsigned long __percpu *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * percpu_ref_init - initialize a percpu refcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * @ref: percpu_ref to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * @release: function which will be called when refcount hits 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * @flags: PERCPU_REF_INIT_* flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * @gfp: allocation mask to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * change the start state to atomic with the latter setting the initial refcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * Note that @release must not sleep - it may potentially be called from RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * callback context by percpu_ref_kill().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned int flags, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) __alignof__(unsigned long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned long start_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct percpu_ref_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ref->percpu_count_ptr = (unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (!ref->percpu_count_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) data = kzalloc(sizeof(*ref->data), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) free_percpu((void __percpu *)ref->percpu_count_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) data->allow_reinit = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) start_count += PERCPU_COUNT_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (flags & PERCPU_REF_INIT_DEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) start_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) atomic_long_set(&data->count, start_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) data->release = release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) data->confirm_switch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) data->ref = ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ref->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) EXPORT_SYMBOL_GPL(percpu_ref_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static void __percpu_ref_exit(struct percpu_ref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (percpu_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* non-NULL confirm_switch indicates switching in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) WARN_ON_ONCE(ref->data && ref->data->confirm_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) free_percpu(percpu_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * percpu_ref_exit - undo percpu_ref_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @ref: percpu_ref to exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * This function exits @ref. The caller is responsible for ensuring that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * @ref is no longer in active use. The usual places to invoke this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * function from are the @ref->release() callback or in init failure path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * where percpu_ref_init() succeeded but other parts of the initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * of the embedding object failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void percpu_ref_exit(struct percpu_ref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct percpu_ref_data *data = ref->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) __percpu_ref_exit(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) spin_lock_irqsave(&percpu_ref_switch_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) __PERCPU_REF_FLAG_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ref->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) EXPORT_SYMBOL_GPL(percpu_ref_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct percpu_ref_data *data = container_of(rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct percpu_ref_data, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct percpu_ref *ref = data->ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) data->confirm_switch(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) data->confirm_switch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) wake_up_all(&percpu_ref_switch_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (!data->allow_reinit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) __percpu_ref_exit(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* drop ref from percpu_ref_switch_to_atomic() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) percpu_ref_put(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct percpu_ref_data *data = container_of(rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct percpu_ref_data, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct percpu_ref *ref = data->ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned long count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) count += *per_cpu_ptr(percpu_count, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) pr_debug("global %lu percpu %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) atomic_long_read(&data->count), count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * It's crucial that we sum the percpu counters _before_ adding the sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * to &ref->count; since gets could be happening on one cpu while puts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * happen on another, adding a single cpu's count could cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * @ref->count to hit 0 before we've got a consistent value - but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * sum of all the counts will be consistent and correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * Subtracting the bias value then has to happen _after_ adding count to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * &ref->count; we need the bias value to prevent &ref->count from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * reaching 0 before we add the percpu counts. But doing it at the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * time is equivalent and saves us atomic operations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) WARN_ONCE(atomic_long_read(&data->count) <= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) data->release, atomic_long_read(&data->count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* @ref is viewed as dead on all CPUs, send out switch confirmation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) percpu_ref_call_confirm_rcu(rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) percpu_ref_func_t *confirm_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (confirm_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) confirm_switch(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* switching from percpu to atomic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Non-NULL ->confirm_switch is used to indicate that switching is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * in progress. Use noop one if unspecified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ref->data->confirm_switch = confirm_switch ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) percpu_ref_noop_confirm_switch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) percpu_ref_get(ref); /* put after confirmation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) BUG_ON(!percpu_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (WARN_ON_ONCE(!ref->data->allow_reinit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * Restore per-cpu operation. smp_store_release() is paired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * with READ_ONCE() in __ref_is_percpu() and guarantees that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * zeroing is visible to all percpu accesses which can see the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * following __PERCPU_REF_ATOMIC clearing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) *per_cpu_ptr(percpu_count, cpu) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) smp_store_release(&ref->percpu_count_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void __percpu_ref_switch_mode(struct percpu_ref *ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) percpu_ref_func_t *confirm_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct percpu_ref_data *data = ref->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) lockdep_assert_held(&percpu_ref_switch_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * If the previous ATOMIC switching hasn't finished yet, wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * its completion. If the caller ensures that ATOMIC switching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * isn't in progress, this function can be called from any context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) percpu_ref_switch_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) __percpu_ref_switch_to_atomic(ref, confirm_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) __percpu_ref_switch_to_percpu(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * @ref: percpu_ref to switch to atomic mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * @confirm_switch: optional confirmation callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * There's no reason to use this function for the usual reference counting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * Use percpu_ref_kill[_and_confirm]().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * Schedule switching of @ref to atomic mode. All its percpu counts will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * be collected to the main atomic counter. On completion, when all CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * are guaraneed to be in atomic mode, @confirm_switch, which may not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * block, is invoked. This function may be invoked concurrently with all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * the get/put operations and can safely be mixed with kill and reinit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * operations. Note that @ref will stay in atomic mode across kill/reinit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * cycles until percpu_ref_switch_to_percpu() is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * This function may block if @ref is in the process of switching to atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * mode. If the caller ensures that @ref is not in the process of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * switching to atomic mode, this function can be called from any context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) percpu_ref_func_t *confirm_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) spin_lock_irqsave(&percpu_ref_switch_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) ref->data->force_atomic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) __percpu_ref_switch_mode(ref, confirm_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * @ref: percpu_ref to switch to atomic mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Schedule switching the ref to atomic mode, and wait for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * switch to complete. Caller must ensure that no other thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * will switch back to percpu mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) percpu_ref_switch_to_atomic(ref, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * @ref: percpu_ref to switch to percpu mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * There's no reason to use this function for the usual reference counting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * To re-use an expired ref, use percpu_ref_reinit().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * Switch @ref to percpu mode. This function may be invoked concurrently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * with all the get/put operations and can safely be mixed with kill and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * reinit operations. This function reverses the sticky atomic state set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * dying or dead, the actual switching takes place on the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * percpu_ref_reinit().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * This function may block if @ref is in the process of switching to atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * mode. If the caller ensures that @ref is not in the process of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * switching to atomic mode, this function can be called from any context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) spin_lock_irqsave(&percpu_ref_switch_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ref->data->force_atomic = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) __percpu_ref_switch_mode(ref, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * @ref: percpu_ref to kill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * @confirm_kill: optional confirmation callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * called after @ref is seen as dead from all CPUs at which point all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * further invocations of percpu_ref_tryget_live() will fail. See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * percpu_ref_tryget_live() for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * This function normally doesn't block and can be called from any context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * but it may block if @confirm_kill is specified and @ref is in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * process of switching to atomic mode by percpu_ref_switch_to_atomic().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * There are no implied RCU grace periods between kill and release.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) percpu_ref_func_t *confirm_kill)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) spin_lock_irqsave(&percpu_ref_switch_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) "%s called more than once on %ps!", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ref->data->release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) __percpu_ref_switch_mode(ref, confirm_kill);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) percpu_ref_put(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * percpu_ref_is_zero - test whether a percpu refcount reached zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * @ref: percpu_ref to test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * Returns %true if @ref reached zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * This function is safe to call as long as @ref is between init and exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) bool percpu_ref_is_zero(struct percpu_ref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) unsigned long __percpu *percpu_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) unsigned long count, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (__ref_is_percpu(ref, &percpu_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /* protect us from being destroyed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) spin_lock_irqsave(&percpu_ref_switch_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (ref->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) count = atomic_long_read(&ref->data->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return count == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) EXPORT_SYMBOL_GPL(percpu_ref_is_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * percpu_ref_reinit - re-initialize a percpu refcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * @ref: perpcu_ref to re-initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * Re-initialize @ref so that it's in the same state as when it finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * initialized successfully and reached 0 but not exited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * this function is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) void percpu_ref_reinit(struct percpu_ref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) WARN_ON_ONCE(!percpu_ref_is_zero(ref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) percpu_ref_resurrect(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) EXPORT_SYMBOL_GPL(percpu_ref_reinit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * percpu_ref_resurrect - modify a percpu refcount from dead to live
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * @ref: perpcu_ref to resurrect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * Modify @ref so that it's in the same state as before percpu_ref_kill() was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * called. @ref must be dead but must not yet have exited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * If @ref->release() frees @ref then the caller is responsible for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * guaranteeing that @ref->release() does not get called while this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * function is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * this function is in progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) void percpu_ref_resurrect(struct percpu_ref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) unsigned long __percpu *percpu_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) spin_lock_irqsave(&percpu_ref_switch_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) percpu_ref_get(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) __percpu_ref_switch_mode(ref, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) EXPORT_SYMBOL_GPL(percpu_ref_resurrect);