^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0+ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * RCU expedited grace periods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corporation, 2016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/lockdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) static void rcu_exp_handler(void *unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static int rcu_print_task_exp_stall(struct rcu_node *rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Record the start of an expedited grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static void rcu_exp_gp_seq_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) rcu_seq_start(&rcu_state.expedited_sequence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Return the value that the expedited-grace-period counter will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * at the end of the current grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return rcu_seq_endval(&rcu_state.expedited_sequence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Record the end of an expedited grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static void rcu_exp_gp_seq_end(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) rcu_seq_end(&rcu_state.expedited_sequence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) smp_mb(); /* Ensure that consecutive grace periods serialize. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Take a snapshot of the expedited-grace-period counter, which is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * earliest value that will indicate that a full grace period has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * elapsed since the current time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static unsigned long rcu_exp_gp_seq_snap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) smp_mb(); /* Caller's modifications seen first by other CPUs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) s = rcu_seq_snap(&rcu_state.expedited_sequence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * if a full expedited grace period has elapsed since that snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * was taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static bool rcu_exp_gp_seq_done(unsigned long s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return rcu_seq_done(&rcu_state.expedited_sequence, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * Reset the ->expmaskinit values in the rcu_node tree to reflect any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * recent CPU-online activity. Note that these masks are not cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * when CPUs go offline, so they reflect the union of all CPUs that have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * ever been online. This means that this function normally takes its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * no-work-to-do fastpath.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static void sync_exp_reset_tree_hotplug(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) bool done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) unsigned long oldmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct rcu_node *rnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct rcu_node *rnp_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* If no new CPUs onlined since last time, nothing to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (likely(ncpus == rcu_state.ncpus_snap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) rcu_state.ncpus_snap = ncpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Each pass through the following loop propagates newly onlined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * CPUs for the current rcu_node structure up the rcu_node tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) rcu_for_each_leaf_node(rnp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) raw_spin_lock_irqsave_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (rnp->expmaskinit == rnp->expmaskinitnext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) continue; /* No new CPUs, nothing to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* Update this node's mask, track old value for propagation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) oldmask = rnp->expmaskinit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) rnp->expmaskinit = rnp->expmaskinitnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* If was already nonzero, nothing to propagate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (oldmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Propagate the new CPU up the tree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) mask = rnp->grpmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) rnp_up = rnp->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) while (rnp_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (rnp_up->expmaskinit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) rnp_up->expmaskinit |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) mask = rnp_up->grpmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) rnp_up = rnp_up->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Reset the ->expmask values in the rcu_node tree in preparation for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * a new expedited grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static void __maybe_unused sync_exp_reset_tree(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct rcu_node *rnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) sync_exp_reset_tree_hotplug();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) rcu_for_each_node_breadth_first(rnp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) raw_spin_lock_irqsave_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) WARN_ON_ONCE(rnp->expmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Return non-zero if there is no RCU expedited grace period in progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * for the specified rcu_node structure, in other words, if all CPUs and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * tasks covered by the specified rcu_node structure have done their bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * for the current expedited grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static bool sync_rcu_exp_done(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) raw_lockdep_assert_held_rcu_node(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return READ_ONCE(rnp->exp_tasks) == NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) READ_ONCE(rnp->expmask) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * Like sync_rcu_exp_done(), but where the caller does not hold the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * rcu_node's ->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) raw_spin_lock_irqsave_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) ret = sync_rcu_exp_done(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Report the exit from RCU read-side critical section for the last task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * that queued itself during or before the current expedited preemptible-RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * grace period. This event is reported either to the rcu_node structure on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * which the task was queued or to one of that rcu_node structure's ancestors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * recursively up the tree. (Calm down, calm down, we do the recursion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * iteratively!)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static void __rcu_report_exp_rnp(struct rcu_node *rnp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) bool wake, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) __releases(rnp->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) raw_lockdep_assert_held_rcu_node(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (!sync_rcu_exp_done(rnp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (!rnp->expmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) rcu_initiate_boost(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (rnp->parent == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (wake) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) smp_mb(); /* EGP done before wake_up(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) swake_up_one(&rcu_state.expedited_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) mask = rnp->grpmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) rnp = rnp->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) WARN_ON_ONCE(!(rnp->expmask & mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * Report expedited quiescent state for specified node. This is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * lock-acquisition wrapper function for __rcu_report_exp_rnp().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) raw_spin_lock_irqsave_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) __rcu_report_exp_rnp(rnp, wake, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Report expedited quiescent state for multiple CPUs, all covered by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * specified leaf rcu_node structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) unsigned long mask, bool wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct rcu_data *rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) raw_spin_lock_irqsave_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (!(rnp->expmask & mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) rdp = per_cpu_ptr(&rcu_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) rdp->rcu_forced_tick_exp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * Report expedited quiescent state for specified rcu_data (CPU).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static void rcu_report_exp_rdp(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) WRITE_ONCE(rdp->exp_deferred_qs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* Common code for work-done checking. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static bool sync_exp_work_done(unsigned long s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (rcu_exp_gp_seq_done(s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) smp_mb(); /* Ensure test happens before caller kfree(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * Funnel-lock acquisition for expedited grace periods. Returns true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * if some other task completed an expedited grace period that this task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * can piggy-back on, and with no mutex held. Otherwise, returns false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * with the mutex held, indicating that the caller must actually do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * expedited grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static bool exp_funnel_lock(unsigned long s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct rcu_node *rnp = rdp->mynode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct rcu_node *rnp_root = rcu_get_root();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* Low-contention fastpath. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) (rnp == rnp_root ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) mutex_trylock(&rcu_state.exp_mutex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) goto fastpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * Each pass through the following loop works its way up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * the rcu_node tree, returning if others have done the work or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * otherwise falls through to acquire ->exp_mutex. The mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * from CPU to rcu_node structure can be inexact, as it is just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * promoting locality and is not strictly needed for correctness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) for (; rnp != NULL; rnp = rnp->parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (sync_exp_work_done(s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Work not done, either wait here or go up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) spin_lock(&rnp->exp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* Someone else doing GP, so wait for them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) spin_unlock(&rnp->exp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) rnp->grplo, rnp->grphi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) TPS("wait"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) sync_exp_work_done(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) spin_unlock(&rnp->exp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) rnp->grplo, rnp->grphi, TPS("nxtlvl"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) mutex_lock(&rcu_state.exp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) fastpath:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (sync_exp_work_done(s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) mutex_unlock(&rcu_state.exp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) rcu_exp_gp_seq_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * Select the CPUs within the specified rcu_node that the upcoming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * expedited grace period needs to wait for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) unsigned long mask_ofl_test;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned long mask_ofl_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct rcu_exp_work *rewp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) container_of(wp, struct rcu_exp_work, rew_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) raw_spin_lock_irqsave_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Each pass checks a CPU for identity, offline, and idle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) mask_ofl_test = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) unsigned long mask = rdp->grpmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (raw_smp_processor_id() == cpu ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) !(rnp->qsmaskinitnext & mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) mask_ofl_test |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) snap = rcu_dynticks_snap(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (rcu_dynticks_in_eqs(snap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) mask_ofl_test |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) rdp->exp_dynticks_snap = snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * Need to wait for any blocked tasks as well. Note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * additional blocking tasks will also block the expedited GP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * until such time as the ->expmask bits are cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (rcu_preempt_has_tasks(rnp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* IPI the remaining CPUs for expedited quiescent state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) unsigned long mask = rdp->grpmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) retry_ipi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) mask_ofl_test |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (get_cpu() == cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) mask_ofl_test |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* The CPU will report the QS in response to the IPI. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* Failed, raced with CPU hotplug operation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) raw_spin_lock_irqsave_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if ((rnp->qsmaskinitnext & mask) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) (rnp->expmask & mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* Online, so delay for a bit and try again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) schedule_timeout_idle(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) goto retry_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /* CPU really is offline, so we must report its QS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (rnp->expmask & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) mask_ofl_test |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* Report quiescent states for those that went offline. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (mask_ofl_test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * Select the nodes that the upcoming expedited grace period needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * to wait for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static void sync_rcu_exp_select_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct rcu_node *rnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) sync_exp_reset_tree();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* Schedule work for each leaf rcu_node structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) rcu_for_each_leaf_node(rnp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) rnp->exp_need_flush = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!READ_ONCE(rnp->expmask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) continue; /* Avoid early boot non-existent wq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (!READ_ONCE(rcu_par_gp_wq) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) rcu_is_last_leaf_node(rnp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* No workqueues yet or last leaf, do direct call. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /* If all offline, queue the work on an unbound CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (unlikely(cpu > rnp->grphi - rnp->grplo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) cpu = WORK_CPU_UNBOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) cpu += rnp->grplo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) rnp->exp_need_flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* Wait for workqueue jobs (if any) to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) rcu_for_each_leaf_node(rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (rnp->exp_need_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) flush_work(&rnp->rew.rew_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * Wait for the expedited grace period to elapse, within time limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * If the time limit is exceeded without the grace period elapsing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * return false, otherwise return true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static bool synchronize_rcu_expedited_wait_once(long tlimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct rcu_node *rnp_root = rcu_get_root();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) sync_rcu_exp_done_unlocked(rnp_root),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) tlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) // Workqueues should not be signaled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) WARN_ON(t < 0); /* workqueues should not be signaled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * Wait for the expedited grace period to elapse, issuing any needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * RCU CPU stall warnings along the way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static void synchronize_rcu_expedited_wait(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) unsigned long j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unsigned long jiffies_stall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) unsigned long jiffies_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) int ndetected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct rcu_data *rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct rcu_node *rnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct rcu_node *rnp_root = rcu_get_root();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) jiffies_stall = rcu_jiffies_till_stall_check();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) jiffies_start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (synchronize_rcu_expedited_wait_once(1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) rcu_for_each_leaf_node(rnp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) rdp = per_cpu_ptr(&rcu_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (rdp->rcu_forced_tick_exp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) rdp->rcu_forced_tick_exp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) j = READ_ONCE(jiffies_till_first_fqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (synchronize_rcu_expedited_wait_once(j + HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (synchronize_rcu_expedited_wait_once(jiffies_stall))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (rcu_stall_is_suppressed())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) panic_on_rcu_stall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) rcu_state.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ndetected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) rcu_for_each_leaf_node(rnp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ndetected += rcu_print_task_exp_stall(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) for_each_leaf_node_possible_cpu(rnp, cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct rcu_data *rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) mask = leaf_node_cpu_bit(rnp, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (!(READ_ONCE(rnp->expmask) & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ndetected++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) rdp = per_cpu_ptr(&rcu_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pr_cont(" %d-%c%c%c", cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) "O."[!!cpu_online(cpu)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) "o."[!!(rdp->grpmask & rnp->expmaskinit)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) jiffies - jiffies_start, rcu_state.expedited_sequence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) data_race(rnp_root->expmask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ".T"[!!data_race(rnp_root->exp_tasks)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (ndetected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) pr_err("blocking rcu_node structures:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) rcu_for_each_node_breadth_first(rnp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (rnp == rnp_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) continue; /* printed unconditionally */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (sync_rcu_exp_done_unlocked(rnp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) pr_cont(" l=%u:%d-%d:%#lx/%c",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) rnp->level, rnp->grplo, rnp->grphi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) data_race(rnp->expmask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) ".T"[!!data_race(rnp->exp_tasks)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) rcu_for_each_leaf_node(rnp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) for_each_leaf_node_possible_cpu(rnp, cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) mask = leaf_node_cpu_bit(rnp, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (!(READ_ONCE(rnp->expmask) & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) dump_cpu_task(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * Wait for the current expedited grace period to complete, and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * wake up everyone who piggybacked on the just-completed expedited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * grace period. Also update all the ->exp_seq_rq counters as needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * in order to avoid counter-wrap problems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static void rcu_exp_wait_wake(unsigned long s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct rcu_node *rnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) synchronize_rcu_expedited_wait();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) // Switch over to wakeup mode, allowing the next GP to proceed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) // End the previous grace period only after acquiring the mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) // to ensure that only one GP runs concurrently with wakeups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) mutex_lock(&rcu_state.exp_wake_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) rcu_exp_gp_seq_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) rcu_for_each_node_breadth_first(rnp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) spin_lock(&rnp->exp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* Recheck, avoid hang in case someone just arrived. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) WRITE_ONCE(rnp->exp_seq_rq, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) spin_unlock(&rnp->exp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) smp_mb(); /* All above changes before wakeup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) mutex_unlock(&rcu_state.exp_wake_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * Common code to drive an expedited grace period forward, used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * workqueues and mid-boot-time tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static void rcu_exp_sel_wait_wake(unsigned long s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* Initialize the rcu_node tree in preparation for the wait. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) sync_rcu_exp_select_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* Wait and clean up, including waking everyone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) rcu_exp_wait_wake(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * Work-queue handler to drive an expedited grace period forward.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static void wait_rcu_exp_gp(struct work_struct *wp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct rcu_exp_work *rewp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) rewp = container_of(wp, struct rcu_exp_work, rew_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) rcu_exp_sel_wait_wake(rewp->rew_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) #ifdef CONFIG_PREEMPT_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * Remote handler for smp_call_function_single(). If there is an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * RCU read-side critical section in effect, request that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * next rcu_read_unlock() record the quiescent state up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * ->expmask fields in the rcu_node tree. Otherwise, immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * report the quiescent state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static void rcu_exp_handler(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) int depth = rcu_preempt_depth();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct rcu_node *rnp = rdp->mynode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct task_struct *t = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * First, the common case of not being in an RCU read-side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * critical section. If also enabled or idle, immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * report the quiescent state, otherwise defer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (!depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) rcu_dynticks_curr_cpu_in_eqs()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) rcu_report_exp_rdp(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) rdp->exp_deferred_qs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) set_tsk_need_resched(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) set_preempt_need_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * Second, the less-common case of being in an RCU read-side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * critical section. In this case we can count on a future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * rcu_read_unlock(). However, this rcu_read_unlock() might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * execute on some other CPU, but in that case there will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * a future context switch. Either way, if the expedited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * grace period is still waiting on this CPU, set ->deferred_qs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * so that the eventual quiescent state will be reported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * Note that there is a large group of race conditions that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * can have caused this quiescent state to already have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * reported, so we really do need to check ->expmask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (depth > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) raw_spin_lock_irqsave_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (rnp->expmask & rdp->grpmask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) rdp->exp_deferred_qs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) t->rcu_read_unlock_special.b.exp_hint = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) // Finally, negative nesting depth should not happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static void sync_sched_exp_online_cleanup(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * Scan the current list of tasks blocked within RCU read-side critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * sections, printing out the tid of each that is blocking the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * expedited grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static int rcu_print_task_exp_stall(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int ndetected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct task_struct *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (!READ_ONCE(rnp->exp_tasks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) raw_spin_lock_irqsave_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) t = list_entry(rnp->exp_tasks->prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct task_struct, rcu_node_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) pr_cont(" P%d", t->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ndetected++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return ndetected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) #else /* #ifdef CONFIG_PREEMPT_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* Request an expedited quiescent state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static void rcu_exp_need_qs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* Store .exp before .rcu_urgent_qs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) set_tsk_need_resched(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) set_preempt_need_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* Invoked on each online non-idle CPU for expedited quiescent state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) static void rcu_exp_handler(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct rcu_node *rnp = rdp->mynode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (rcu_is_cpu_rrupt_from_idle()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) rcu_exp_need_qs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static void sync_sched_exp_online_cleanup(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) int my_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct rcu_data *rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct rcu_node *rnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) rdp = per_cpu_ptr(&rcu_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) rnp = rdp->mynode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) my_cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* Quiescent state either not needed or already requested, leave. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) rdp->cpu_no_qs.b.exp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* Quiescent state needed on current CPU, so set it up locally. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (my_cpu == cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) rcu_exp_need_qs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* Quiescent state needed on some other CPU, send IPI. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) WARN_ON_ONCE(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * Because preemptible RCU does not exist, we never have to check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * tasks blocked within RCU read-side critical sections that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * blocking the current expedited grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) static int rcu_print_task_exp_stall(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * synchronize_rcu_expedited - Brute-force RCU grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * Wait for an RCU grace period, but expedite it. The basic idea is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * the CPU is in an RCU critical section, and if so, it sets a flag that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * causes the outermost rcu_read_unlock() to report the quiescent state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * for RCU-preempt or asks the scheduler for help for RCU-sched. On the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * other hand, if the CPU is not in an RCU read-side critical section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * the IPI handler reports the quiescent state immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * Although this is a great improvement over previous expedited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * implementations, it is still unfriendly to real-time workloads, so is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * thus not recommended for any sort of common-case code. In fact, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * you are using synchronize_rcu_expedited() in a loop, please restructure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * your code to batch your updates, and then use a single synchronize_rcu()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * This has the same semantics as (but is more brutal than) synchronize_rcu().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) void synchronize_rcu_expedited(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) bool no_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct rcu_exp_work rew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct rcu_node *rnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) unsigned long s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) lock_is_held(&rcu_lock_map) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) lock_is_held(&rcu_sched_lock_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /* Is the state is such that the call is a grace period? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (rcu_blocking_is_gp())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /* If expedited grace periods are prohibited, fall back to normal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (rcu_gp_is_normal()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) wait_rcu_gp(call_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /* Take a snapshot of the sequence number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) s = rcu_exp_gp_seq_snap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (exp_funnel_lock(s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return; /* Someone else did our work for us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* Don't use workqueue during boot or from an incoming CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) no_wq = rcu_scheduler_active == RCU_SCHEDULER_INIT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) !cpumask_test_cpu(smp_processor_id(), cpu_active_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* Ensure that load happens before action based on it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (unlikely(no_wq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* Direct call for scheduler init, early_initcall()s, and incoming CPUs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) rcu_exp_sel_wait_wake(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /* Marshall arguments & schedule the expedited grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) rew.rew_s = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) queue_work(rcu_gp_wq, &rew.rew_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* Wait for expedited grace period to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) rnp = rcu_get_root();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) sync_exp_work_done(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) smp_mb(); /* Workqueue actions happen before return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /* Let the next expedited grace period start. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) mutex_unlock(&rcu_state.exp_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (likely(!no_wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) destroy_work_on_stack(&rew.rew_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);