^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Sleepable Read-Copy Update mechanism for mutual exclusion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) IBM Corporation, 2006
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) Fujitsu, 2012
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Authors: Paul McKenney <paulmck@linux.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Lai Jiangshan <laijs@cn.fujitsu.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * For detailed explanation of Read-Copy Update mechanism see -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Documentation/RCU/ *.txt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define pr_fmt(fmt) "rcu: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/preempt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/rcupdate_wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/srcu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "rcu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "rcu_segcblist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Holdoff in nanoseconds for auto-expediting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) module_param(exp_holdoff, ulong, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static ulong counter_wrap_check = (ULONG_MAX >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) module_param(counter_wrap_check, ulong, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Early-boot callback-management, so early that no lock is required! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static LIST_HEAD(srcu_boot_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static bool __read_mostly srcu_init_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static void srcu_invoke_callbacks(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static void process_srcu(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static void srcu_delay_timer(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define spin_lock_rcu_node(p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) spin_lock(&ACCESS_PRIVATE(p, lock)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) smp_mb__after_unlock_lock(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define spin_lock_irq_rcu_node(p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) smp_mb__after_unlock_lock(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define spin_unlock_irq_rcu_node(p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define spin_lock_irqsave_rcu_node(p, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) smp_mb__after_unlock_lock(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define spin_unlock_irqrestore_rcu_node(p, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Initialize SRCU combining tree. Note that statically allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * srcu_struct structures might already have srcu_read_lock() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * srcu_read_unlock() running against them. So if the is_static parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int levelspread[RCU_NUM_LVLS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct srcu_data *sdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct srcu_node *snp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct srcu_node *snp_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Initialize geometry if it has not already been initialized. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) rcu_init_geometry();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Work out the overall tree geometry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ssp->level[0] = &ssp->node[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) for (i = 1; i < rcu_num_lvls; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) rcu_init_levelspread(levelspread, num_rcu_lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Each pass through this loop initializes one srcu_node structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) srcu_for_each_node_breadth_first(ssp, snp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) spin_lock_init(&ACCESS_PRIVATE(snp, lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ARRAY_SIZE(snp->srcu_data_have_cbs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) snp->srcu_have_cbs[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) snp->srcu_data_have_cbs[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) snp->srcu_gp_seq_needed_exp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) snp->grplo = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) snp->grphi = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (snp == &ssp->node[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Root node, special case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) snp->srcu_parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* Non-root node. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (snp == ssp->level[level + 1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) level++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) snp->srcu_parent = ssp->level[level - 1] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) (snp - ssp->level[level]) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) levelspread[level - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Initialize the per-CPU srcu_data array, which feeds into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * leaves of the srcu_node tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ARRAY_SIZE(sdp->srcu_unlock_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) level = rcu_num_lvls - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) snp_first = ssp->level[level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) sdp = per_cpu_ptr(ssp->sda, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) rcu_segcblist_init(&sdp->srcu_cblist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) sdp->srcu_cblist_invoking = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) sdp->mynode = &snp_first[cpu / levelspread[level]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (snp->grplo < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) snp->grplo = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) snp->grphi = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) sdp->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) INIT_WORK(&sdp->work, srcu_invoke_callbacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) sdp->ssp = ssp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (is_static)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* Dynamically allocated, better be no srcu_read_locks()! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) sdp->srcu_lock_count[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) sdp->srcu_unlock_count[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * Initialize non-compile-time initialized fields, including the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * associated srcu_node and srcu_data structures. The is_static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * parameter is passed through to init_srcu_struct_nodes(), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * also tells us that ->sda has already been wired up to srcu_data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mutex_init(&ssp->srcu_cb_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) mutex_init(&ssp->srcu_gp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ssp->srcu_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ssp->srcu_gp_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ssp->srcu_barrier_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) mutex_init(&ssp->srcu_barrier_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) INIT_DELAYED_WORK(&ssp->work, process_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (!is_static)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ssp->sda = alloc_percpu(struct srcu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) init_srcu_struct_nodes(ssp, is_static);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ssp->srcu_gp_seq_needed_exp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return ssp->sda ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct lock_class_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Don't re-initialize a lock while it is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) lockdep_init_map(&ssp->dep_map, name, key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return init_srcu_struct_fields(ssp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) EXPORT_SYMBOL_GPL(__init_srcu_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * init_srcu_struct - initialize a sleep-RCU structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * @ssp: structure to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * Must invoke this on a given srcu_struct before passing that srcu_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * to any other function. Each srcu_struct represents a separate domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * of SRCU protection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int init_srcu_struct(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return init_srcu_struct_fields(ssp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) EXPORT_SYMBOL_GPL(init_srcu_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * First-use initialization of statically allocated srcu_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * structure. Wiring up the combining tree is more than can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * done with compile-time initialization, so this check is added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * to each update-side SRCU primitive. Use ssp->lock, which -is-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * compile-time initialized, to resolve races involving multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * CPUs trying to garner first-use privileges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void check_init_srcu_struct(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* The smp_load_acquire() pairs with the smp_store_release(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return; /* Already initialized. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) spin_lock_irqsave_rcu_node(ssp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) spin_unlock_irqrestore_rcu_node(ssp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) init_srcu_struct_fields(ssp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) spin_unlock_irqrestore_rcu_node(ssp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * Returns approximate total of the readers' ->srcu_lock_count[] values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * for the rank of per-CPU counters specified by idx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) unsigned long sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * Returns approximate total of the readers' ->srcu_unlock_count[] values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * for the rank of per-CPU counters specified by idx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unsigned long sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * Return true if the number of pre-existing readers is determined to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * be zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned long unlocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) unlocks = srcu_readers_unlock_idx(ssp, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * Make sure that a lock is always counted if the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * unlock is counted. Needs to be a smp_mb() as the read side may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * contain a read from a variable that is written to before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * synchronize_srcu() in the write side. In this case smp_mb()s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * A and B act like the store buffering pattern.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * This smp_mb() also pairs with smp_mb() C to prevent accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * after the synchronize_srcu() from being executed before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * grace period ends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) smp_mb(); /* A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * If the locks are the same as the unlocks, then there must have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * been no readers on this index at some time in between. This does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * not mean that there are no more readers, as one could have read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * the current index but not have incremented the lock counter yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * So suppose that the updater is preempted here for so long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * that more than ULONG_MAX non-nested readers come and go in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * the meantime. It turns out that this cannot result in overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * because if a reader modifies its unlock count after we read it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * above, then that reader's next load of ->srcu_idx is guaranteed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * to get the new value, which will cause it to operate on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * other bank of counters, where it cannot contribute to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * overflow of these counters. This means that there is a maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * of 2*NR_CPUS increments, which cannot overflow given current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * systems, especially not on 64-bit systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * OK, how about nesting? This does impose a limit on nesting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * especially on 64-bit systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return srcu_readers_lock_idx(ssp, idx) == unlocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * srcu_readers_active - returns true if there are readers. and false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * Note that this is not an atomic primitive, and can therefore suffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * severe errors when invoked on an active srcu_struct. That said, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * can be useful as an error check at cleanup time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static bool srcu_readers_active(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) unsigned long sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) sum += READ_ONCE(cpuc->srcu_lock_count[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) sum += READ_ONCE(cpuc->srcu_lock_count[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #define SRCU_INTERVAL 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * Return grace-period delay, zero if there are expedited grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * periods pending, SRCU_INTERVAL otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static unsigned long srcu_get_delay(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return SRCU_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * cleanup_srcu_struct - deconstruct a sleep-RCU structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * @ssp: structure to clean up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * Must invoke this after you are finished using a given srcu_struct that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * was initialized via init_srcu_struct(), else you leak memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) void cleanup_srcu_struct(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (WARN_ON(!srcu_get_delay(ssp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return; /* Just leak it! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (WARN_ON(srcu_readers_active(ssp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return; /* Just leak it! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) flush_delayed_work(&ssp->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) del_timer_sync(&sdp->delay_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) flush_work(&sdp->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return; /* Forgot srcu_barrier(), so just leak it! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) WARN_ON(srcu_readers_active(ssp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) pr_info("%s: Active srcu_struct %p state: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return; /* Caller forgot to stop doing call_srcu()? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) free_percpu(ssp->sda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ssp->sda = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * Counts the new reader in the appropriate per-CPU element of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * srcu_struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * Returns an index that must be passed to the matching srcu_read_unlock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int __srcu_read_lock(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) idx = READ_ONCE(ssp->srcu_idx) & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) smp_mb(); /* B */ /* Avoid leaking the critical section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) EXPORT_SYMBOL_GPL(__srcu_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * Removes the count for the old reader from the appropriate per-CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * element of the srcu_struct. Note that this may well be a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * CPU than that which was incremented by the corresponding srcu_read_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) smp_mb(); /* C */ /* Avoid leaking the critical section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) EXPORT_SYMBOL_GPL(__srcu_read_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * We use an adaptive strategy for synchronize_srcu() and especially for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * synchronize_srcu_expedited(). We spin for a fixed time period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * (defined below) to allow SRCU readers to exit their read-side critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * sections. If there are still some readers after a few microseconds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * we repeatedly block for 1-millisecond time periods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) #define SRCU_RETRY_CHECK_DELAY 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * Start an SRCU grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static void srcu_gp_start(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) rcu_segcblist_advance(&sdp->srcu_cblist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) rcu_seq_current(&ssp->srcu_gp_seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) rcu_seq_snap(&ssp->srcu_gp_seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) rcu_seq_start(&ssp->srcu_gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) state = rcu_seq_state(ssp->srcu_gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void srcu_delay_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (!delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) timer_reduce(&sdp->delay_work, jiffies + delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * Schedule callback invocation for the specified srcu_data structure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * if possible, on the corresponding CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) srcu_queue_delayed_work_on(sdp, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * Schedule callback invocation for all srcu_data structures associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * with the specified srcu_node structure that have callbacks for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * just-completed grace period, the one corresponding to idx. If possible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * schedule this invocation on the corresponding CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) unsigned long mask, unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (!(mask & (1 << (cpu - snp->grplo))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * Note the end of an SRCU grace period. Initiates callback invocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * and starts a new grace period if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * The ->srcu_cb_mutex acquisition does not protect any data, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * instead prevents more than one grace period from starting while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * are initiating callback invocation. This allows the ->srcu_have_cbs[]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * array to have a finite number of elements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static void srcu_gp_end(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) unsigned long cbdelay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) bool cbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) bool last_lvl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) unsigned long gpseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct srcu_data *sdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct srcu_node *snp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* Prevent more than one additional grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) mutex_lock(&ssp->srcu_cb_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* End the current grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) spin_lock_irq_rcu_node(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) idx = rcu_seq_state(ssp->srcu_gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) cbdelay = srcu_get_delay(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) rcu_seq_end(&ssp->srcu_gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) spin_unlock_irq_rcu_node(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) mutex_unlock(&ssp->srcu_gp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* A new grace period can start at this point. But only one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* Initiate callback invocation as needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) srcu_for_each_node_breadth_first(ssp, snp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) spin_lock_irq_rcu_node(snp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) cbs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (last_lvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) cbs = snp->srcu_have_cbs[idx] == gpseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) snp->srcu_have_cbs[idx] = gpseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) mask = snp->srcu_data_have_cbs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) snp->srcu_data_have_cbs[idx] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) spin_unlock_irq_rcu_node(snp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (cbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* Occasionally prevent srcu_data counter wrap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (!(gpseq & counter_wrap_check) && last_lvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) sdp = per_cpu_ptr(ssp->sda, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) spin_lock_irqsave_rcu_node(sdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (ULONG_CMP_GE(gpseq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) sdp->srcu_gp_seq_needed + 100))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) sdp->srcu_gp_seq_needed = gpseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (ULONG_CMP_GE(gpseq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) sdp->srcu_gp_seq_needed_exp + 100))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) sdp->srcu_gp_seq_needed_exp = gpseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) spin_unlock_irqrestore_rcu_node(sdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* Callback initiation done, allow grace periods after next. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) mutex_unlock(&ssp->srcu_cb_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* Start a new grace period if needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) spin_lock_irq_rcu_node(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (!rcu_seq_state(gpseq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) srcu_gp_start(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) spin_unlock_irq_rcu_node(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) srcu_reschedule(ssp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) spin_unlock_irq_rcu_node(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * Funnel-locking scheme to scalably mediate many concurrent expedited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * grace-period requests. This function is invoked for the first known
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * expedited request for a grace period that has already been requested,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * but without expediting. To start a completely new grace period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * whether expedited or not, use srcu_funnel_gp_start() instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) unsigned long s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) for (; snp != NULL; snp = snp->srcu_parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) spin_lock_irqsave_rcu_node(snp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) spin_unlock_irqrestore_rcu_node(snp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) spin_unlock_irqrestore_rcu_node(snp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) spin_lock_irqsave_rcu_node(ssp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) spin_unlock_irqrestore_rcu_node(ssp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * Funnel-locking scheme to scalably mediate many concurrent grace-period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * requests. The winner has to do the work of actually starting grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * period s. Losers must either ensure that their desired grace-period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * number is recorded on at least their leaf srcu_node structure, or they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * must take steps to invoke their own callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * Note that this function also does the work of srcu_funnel_exp_start(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * in some cases by directly invoking it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) unsigned long s, bool do_norm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) struct srcu_node *snp = sdp->mynode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) unsigned long snp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /* Each pass through the loop does one level of the srcu_node tree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) for (; snp != NULL; snp = snp->srcu_parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return; /* GP already done and CBs recorded. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) spin_lock_irqsave_rcu_node(snp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) snp_seq = snp->srcu_have_cbs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (snp == sdp->mynode && snp_seq == s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) spin_unlock_irqrestore_rcu_node(snp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (snp == sdp->mynode && snp_seq != s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) srcu_schedule_cbs_sdp(sdp, do_norm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ? SRCU_INTERVAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (!do_norm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) srcu_funnel_exp_start(ssp, snp, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) snp->srcu_have_cbs[idx] = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (snp == sdp->mynode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) spin_unlock_irqrestore_rcu_node(snp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /* Top of tree, must ensure the grace period will be started. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) spin_lock_irqsave_rcu_node(ssp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * Record need for grace period s. Pair with load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * acquire setting up for initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* If grace period not already done and none in progress, start it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) srcu_gp_start(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (likely(srcu_init_done))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) queue_delayed_work(rcu_gp_wq, &ssp->work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) srcu_get_delay(ssp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) else if (list_empty(&ssp->work.work.entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) list_add(&ssp->work.work.entry, &srcu_boot_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) spin_unlock_irqrestore_rcu_node(ssp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * Wait until all readers counted by array index idx complete, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * loop an additional time if there is an expedited grace period pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * The caller must ensure that ->srcu_idx is not changed while checking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (srcu_readers_active_idx_check(ssp, idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (--trycount + !srcu_get_delay(ssp) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) udelay(SRCU_RETRY_CHECK_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * Increment the ->srcu_idx counter so that future SRCU readers will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * us to wait for pre-existing readers in a starvation-free manner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static void srcu_flip(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * Ensure that if this updater saw a given reader's increment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * from __srcu_read_lock(), that reader was using an old value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * of ->srcu_idx. Also ensure that if a given reader sees the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * new value of ->srcu_idx, this updater's earlier scans cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * have seen that reader's increments (which is OK, because this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * grace period need not wait on that reader).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) smp_mb(); /* E */ /* Pairs with B and C. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * Ensure that if the updater misses an __srcu_read_unlock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * increment, that task's next __srcu_read_lock() will see the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * above counter update. Note that both this memory barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * and the one in srcu_readers_active_idx_check() provide the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * guarantee for __srcu_read_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) smp_mb(); /* D */ /* Pairs with C. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * If SRCU is likely idle, return true, otherwise return false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * Note that it is OK for several current from-idle requests for a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * grace period from idle to specify expediting because they will all end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * up requesting the same grace period anyhow. So no loss.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * Note also that if any CPU (including the current one) is still invoking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * callbacks, this function will nevertheless say "idle". This is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * ideal, but the overhead of checking all CPUs' callback lists is even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * less ideal, especially on large systems. Furthermore, the wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * can happen before the callback is fully removed, so we have no choice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * but to accept this type of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * This function is also subject to counter-wrap errors, but let's face
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * it, if this function was preempted for enough time for the counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * to wrap, it really doesn't matter whether or not we expedite the grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * period. The extra overhead of a needlessly expedited grace period is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * negligible when amortized over that time period, and the extra latency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * of a needlessly non-expedited grace period is similarly negligible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) static bool srcu_might_be_idle(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned long curseq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct srcu_data *sdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) unsigned long t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) unsigned long tlast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) check_init_srcu_struct(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /* If the local srcu_data structure has callbacks, not idle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) sdp = raw_cpu_ptr(ssp->sda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) spin_lock_irqsave_rcu_node(sdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) spin_unlock_irqrestore_rcu_node(sdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return false; /* Callbacks already present, so not idle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) spin_unlock_irqrestore_rcu_node(sdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * No local callbacks, so probabalistically probe global state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * Exact information would require acquiring locks, which would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * kill scalability, hence the probabalistic nature of the probe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* First, see if enough time has passed since the last GP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) t = ktime_get_mono_fast_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) tlast = READ_ONCE(ssp->srcu_last_gp_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (exp_holdoff == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) time_in_range_open(t, tlast, tlast + exp_holdoff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return false; /* Too soon after last GP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /* Next, check for probable idleness. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) curseq = rcu_seq_current(&ssp->srcu_gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return false; /* Grace period in progress, so not idle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) smp_mb(); /* Order ->srcu_gp_seq with prior access. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return false; /* GP # changed, so not idle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return true; /* With reasonable probability, idle! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * SRCU callback function to leak a callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static void srcu_leak_callback(struct rcu_head *rhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * Start an SRCU grace period, and also queue the callback if non-NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct rcu_head *rhp, bool do_norm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) bool needexp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) bool needgp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) unsigned long s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct srcu_data *sdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) check_init_srcu_struct(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) idx = srcu_read_lock(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) sdp = raw_cpu_ptr(ssp->sda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) spin_lock_irqsave_rcu_node(sdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (rhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) rcu_segcblist_advance(&sdp->srcu_cblist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) rcu_seq_current(&ssp->srcu_gp_seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) s = rcu_seq_snap(&ssp->srcu_gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) sdp->srcu_gp_seq_needed = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) needgp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) sdp->srcu_gp_seq_needed_exp = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) needexp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) spin_unlock_irqrestore_rcu_node(sdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (needgp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) srcu_funnel_gp_start(ssp, sdp, s, do_norm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) else if (needexp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) srcu_funnel_exp_start(ssp, sdp->mynode, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) srcu_read_unlock(ssp, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * Enqueue an SRCU callback on the srcu_data structure associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * the current CPU and the specified srcu_struct structure, initiating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * grace-period processing if it is not already running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * Note that all CPUs must agree that the grace period extended beyond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * all pre-existing SRCU read-side critical section. On systems with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * more than one CPU, this means that when "func()" is invoked, each CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * is guaranteed to have executed a full memory barrier since the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * its last corresponding SRCU read-side critical section whose beginning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * preceded the call to call_srcu(). It also means that each CPU executing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * an SRCU read-side critical section that continues beyond the start of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * "func()" must have executed a memory barrier after the call_srcu()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * but before the beginning of that SRCU read-side critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * Note that these guarantees include CPUs that are offline, idle, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * executing in user mode, as well as CPUs that are executing in the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * resulting SRCU callback function "func()", then both CPU A and CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * B are guaranteed to execute a full memory barrier during the time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * interval between the call to call_srcu() and the invocation of "func()".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * This guarantee applies even if CPU A and CPU B are the same CPU (but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * again only if the system has more than one CPU).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * Of course, these guarantees apply only for invocations of call_srcu(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * srcu_struct structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) rcu_callback_t func, bool do_norm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (debug_rcu_head_queue(rhp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /* Probable double call_srcu(), so leak the callback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) WRITE_ONCE(rhp->func, srcu_leak_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) rhp->func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * call_srcu() - Queue a callback for invocation after an SRCU grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * @ssp: srcu_struct in queue the callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * @rhp: structure to be used for queueing the SRCU callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * @func: function to be invoked after the SRCU grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * The callback function will be invoked some time after a full SRCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * grace period elapses, in other words after all pre-existing SRCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * read-side critical sections have completed. However, the callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * function might well execute concurrently with other SRCU read-side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * critical sections that started after call_srcu() was invoked. SRCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * read-side critical sections are delimited by srcu_read_lock() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * srcu_read_unlock(), and may be nested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * The callback will be invoked from process context, but must nevertheless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * be fast and must not block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) rcu_callback_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) __call_srcu(ssp, rhp, func, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) EXPORT_SYMBOL_GPL(call_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct rcu_synchronize rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) lock_is_held(&rcu_bh_lock_map) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) lock_is_held(&rcu_lock_map) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) lock_is_held(&rcu_sched_lock_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) check_init_srcu_struct(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) init_completion(&rcu.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) init_rcu_head_on_stack(&rcu.head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) wait_for_completion(&rcu.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) destroy_rcu_head_on_stack(&rcu.head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * Make sure that later code is ordered after the SRCU grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * period. This pairs with the spin_lock_irq_rcu_node()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * because the current CPU might have been totally uninvolved with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * (and thus unordered against) that grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * synchronize_srcu_expedited - Brute-force SRCU grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * @ssp: srcu_struct with which to synchronize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * Wait for an SRCU grace period to elapse, but be more aggressive about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * spinning rather than blocking when waiting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * Note that synchronize_srcu_expedited() has the same deadlock and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * memory-ordering properties as does synchronize_srcu().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) void synchronize_srcu_expedited(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) __synchronize_srcu(ssp, rcu_gp_is_normal());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * synchronize_srcu - wait for prior SRCU read-side critical-section completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * @ssp: srcu_struct with which to synchronize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * Wait for the count to drain to zero of both indexes. To avoid the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * possible starvation of synchronize_srcu(), it waits for the count of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * and then flip the srcu_idx and wait for the count of the other index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * Can block; must be called from process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * Note that it is illegal to call synchronize_srcu() from the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * SRCU read-side critical section; doing so will result in deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * However, it is perfectly legal to call synchronize_srcu() on one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * srcu_struct from some other srcu_struct's read-side critical section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * as long as the resulting graph of srcu_structs is acyclic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * There are memory-ordering constraints implied by synchronize_srcu().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * On systems with more than one CPU, when synchronize_srcu() returns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * each CPU is guaranteed to have executed a full memory barrier since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * the end of its last corresponding SRCU read-side critical section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * whose beginning preceded the call to synchronize_srcu(). In addition,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * each CPU having an SRCU read-side critical section that extends beyond
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * the return from synchronize_srcu() is guaranteed to have executed a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * full memory barrier after the beginning of synchronize_srcu() and before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * the beginning of that SRCU read-side critical section. Note that these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * guarantees include CPUs that are offline, idle, or executing in user mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * as well as CPUs that are executing in the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * Furthermore, if CPU A invoked synchronize_srcu(), which returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * to its caller on CPU B, then both CPU A and CPU B are guaranteed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * to have executed a full memory barrier during the execution of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * are the same CPU, but again only if the system has more than one CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * Of course, these memory-ordering guarantees apply only when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * passed the same srcu_struct structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * If SRCU is likely idle, expedite the first request. This semantic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * was provided by Classic SRCU, and is relied upon by its users, so TREE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * SRCU must also provide it. Note that detecting idleness is heuristic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * and subject to both false positives and negatives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) void synchronize_srcu(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) synchronize_srcu_expedited(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) __synchronize_srcu(ssp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) EXPORT_SYMBOL_GPL(synchronize_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * @ssp: srcu_struct to provide cookie for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * This function returns a cookie that can be passed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * poll_state_synchronize_srcu(), which will return true if a full grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * period has elapsed in the meantime. It is the caller's responsibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * to make sure that grace period happens, for example, by invoking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * call_srcu() after return from get_state_synchronize_srcu().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) // Any prior manipulation of SRCU-protected data must happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) // before the load from ->srcu_gp_seq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return rcu_seq_snap(&ssp->srcu_gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * start_poll_synchronize_srcu - Provide cookie and start grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * @ssp: srcu_struct to provide cookie for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * This function returns a cookie that can be passed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * poll_state_synchronize_srcu(), which will return true if a full grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * this function also ensures that any needed SRCU grace period will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * started. This convenience does come at a cost in terms of CPU overhead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return srcu_gp_start_if_needed(ssp, NULL, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * poll_state_synchronize_srcu - Has cookie's grace period ended?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * @ssp: srcu_struct to provide cookie for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * This function takes the cookie that was returned from either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * returns @true if an SRCU grace period elapsed since the time that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * cookie was created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) // Ensure that the end of the SRCU grace period happens before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) // any subsequent code that the caller might execute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) smp_mb(); // ^^^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * Callback function for srcu_barrier() use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) static void srcu_barrier_cb(struct rcu_head *rhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct srcu_data *sdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) struct srcu_struct *ssp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) ssp = sdp->ssp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) complete(&ssp->srcu_barrier_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * @ssp: srcu_struct on which to wait for in-flight callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) void srcu_barrier(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct srcu_data *sdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) check_init_srcu_struct(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) mutex_lock(&ssp->srcu_barrier_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) smp_mb(); /* Force ordering following return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) mutex_unlock(&ssp->srcu_barrier_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return; /* Someone else did our work for us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) rcu_seq_start(&ssp->srcu_barrier_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) init_completion(&ssp->srcu_barrier_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /* Initial count prevents reaching zero until all CBs are posted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * Each pass through this loop enqueues a callback, but only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * on CPUs already having callbacks enqueued. Note that if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * a CPU already has callbacks enqueue, it must have already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * registered the need for a future grace period, so all we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * need do is enqueue a callback that will use the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * grace period as the last callback already in the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) sdp = per_cpu_ptr(ssp->sda, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) spin_lock_irq_rcu_node(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) atomic_inc(&ssp->srcu_barrier_cpu_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) sdp->srcu_barrier_head.func = srcu_barrier_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) debug_rcu_head_queue(&sdp->srcu_barrier_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) &sdp->srcu_barrier_head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) atomic_dec(&ssp->srcu_barrier_cpu_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) spin_unlock_irq_rcu_node(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) /* Remove the initial count, at which point reaching zero can happen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) complete(&ssp->srcu_barrier_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) wait_for_completion(&ssp->srcu_barrier_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) rcu_seq_end(&ssp->srcu_barrier_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) mutex_unlock(&ssp->srcu_barrier_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) EXPORT_SYMBOL_GPL(srcu_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * srcu_batches_completed - return batches completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * @ssp: srcu_struct on which to report batch completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * Report the number of batches, correlated with, but not necessarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * precisely the same as, the number of grace periods that have elapsed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) unsigned long srcu_batches_completed(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) return READ_ONCE(ssp->srcu_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) EXPORT_SYMBOL_GPL(srcu_batches_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * Core SRCU state machine. Push state bits of ->srcu_gp_seq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * completed in that state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) static void srcu_advance_state(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) mutex_lock(&ssp->srcu_gp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * Because readers might be delayed for an extended period after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * fetching ->srcu_idx for their index, at any point in time there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * might well be readers using both idx=0 and idx=1. We therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * need to wait for readers to clear from both index values before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * invoking a callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * The load-acquire ensures that we see the accesses performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * by the prior grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (idx == SRCU_STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) spin_lock_irq_rcu_node(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) spin_unlock_irq_rcu_node(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) mutex_unlock(&ssp->srcu_gp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (idx == SRCU_STATE_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) srcu_gp_start(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) spin_unlock_irq_rcu_node(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (idx != SRCU_STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) mutex_unlock(&ssp->srcu_gp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return; /* Someone else started the grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) idx = 1 ^ (ssp->srcu_idx & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (!try_check_zero(ssp, idx, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) mutex_unlock(&ssp->srcu_gp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return; /* readers present, retry later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) srcu_flip(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) spin_lock_irq_rcu_node(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) spin_unlock_irq_rcu_node(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * SRCU read-side critical sections are normally short,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * so check at least twice in quick succession after a flip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) idx = 1 ^ (ssp->srcu_idx & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (!try_check_zero(ssp, idx, 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) mutex_unlock(&ssp->srcu_gp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return; /* readers present, retry later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * Invoke a limited number of SRCU callbacks that have passed through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * their grace period. If there are more to do, SRCU will reschedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * the workqueue. Note that needed memory barriers have been executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * in this task's context by srcu_readers_active_idx_check().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static void srcu_invoke_callbacks(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) bool more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) struct rcu_cblist ready_cbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) struct rcu_head *rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) struct srcu_data *sdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct srcu_struct *ssp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) sdp = container_of(work, struct srcu_data, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) ssp = sdp->ssp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) rcu_cblist_init(&ready_cbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) spin_lock_irq_rcu_node(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) rcu_segcblist_advance(&sdp->srcu_cblist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) rcu_seq_current(&ssp->srcu_gp_seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (sdp->srcu_cblist_invoking ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) spin_unlock_irq_rcu_node(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return; /* Someone else on the job or nothing to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /* We are on the job! Extract and invoke ready callbacks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) sdp->srcu_cblist_invoking = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) spin_unlock_irq_rcu_node(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) rhp = rcu_cblist_dequeue(&ready_cbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) debug_rcu_head_unqueue(rhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) rhp->func(rhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * Update counts, accelerate new callbacks, and if needed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) * schedule another round of callback invocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) spin_lock_irq_rcu_node(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) rcu_seq_snap(&ssp->srcu_gp_seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) sdp->srcu_cblist_invoking = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) spin_unlock_irq_rcu_node(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (more)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) srcu_schedule_cbs_sdp(sdp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * Finished one round of SRCU grace period. Start another if there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * more SRCU callbacks queued, otherwise put SRCU into not-running state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) bool pushgp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) spin_lock_irq_rcu_node(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /* All requests fulfilled, time to go idle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) pushgp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) /* Outstanding request and no GP. Start one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) srcu_gp_start(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) spin_unlock_irq_rcu_node(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (pushgp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * This is the work-queue function that handles SRCU grace periods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) static void process_srcu(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) struct srcu_struct *ssp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) ssp = container_of(work, struct srcu_struct, work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) srcu_advance_state(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) srcu_reschedule(ssp, srcu_get_delay(ssp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) void srcutorture_get_gp_data(enum rcutorture_type test_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) struct srcu_struct *ssp, int *flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) unsigned long *gp_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (test_type != SRCU_FLAVOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) *flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) unsigned long s0 = 0, s1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) idx = ssp->srcu_idx & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) unsigned long l0, l1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) unsigned long u0, u1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) long c0, c1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) struct srcu_data *sdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) sdp = per_cpu_ptr(ssp->sda, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) u0 = data_race(sdp->srcu_unlock_count[!idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) u1 = data_race(sdp->srcu_unlock_count[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * Make sure that a lock is always counted if the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * unlock is counted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) l0 = data_race(sdp->srcu_lock_count[!idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) l1 = data_race(sdp->srcu_lock_count[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) c0 = l0 - u0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) c1 = l1 - u1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) pr_cont(" %d(%ld,%ld %c)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) cpu, c0, c1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) s0 += c0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) s1 += c1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) pr_cont(" T(%ld,%ld)\n", s0, s1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) static int __init srcu_bootup_announce(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) pr_info("Hierarchical SRCU implementation.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) early_initcall(srcu_bootup_announce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) void __init srcu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) struct srcu_struct *ssp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) srcu_init_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) while (!list_empty(&srcu_boot_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) work.work.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) check_init_srcu_struct(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) list_del_init(&ssp->work.work.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) queue_work(rcu_gp_wq, &ssp->work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /* Initialize any global-scope srcu_struct structures used by this module. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) static int srcu_module_coming(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) struct srcu_struct **sspp = mod->srcu_struct_ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) for (i = 0; i < mod->num_srcu_structs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) ret = init_srcu_struct(*(sspp++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (WARN_ON_ONCE(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) /* Clean up any global-scope srcu_struct structures used by this module. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) static void srcu_module_going(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) struct srcu_struct **sspp = mod->srcu_struct_ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) for (i = 0; i < mod->num_srcu_structs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) cleanup_srcu_struct(*(sspp++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) /* Handle one module, either coming or going. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) static int srcu_module_notify(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) struct module *mod = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) case MODULE_STATE_COMING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) ret = srcu_module_coming(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) case MODULE_STATE_GOING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) srcu_module_going(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) static struct notifier_block srcu_module_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) .notifier_call = srcu_module_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) .priority = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) static __init int init_srcu_module_notifier(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) ret = register_module_notifier(&srcu_module_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) pr_warn("Failed to register srcu module notifier\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) late_initcall(init_srcu_module_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) #endif /* #ifdef CONFIG_MODULES */