^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0+ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Read-Copy Update mechanism for mutual exclusion (tree-based version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Internal non-public definitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright IBM Corporation, 2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author: Ingo Molnar <mingo@elte.hu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Paul E. McKenney <paulmck@linux.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/rtmutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/seqlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/swait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/rcu_node_tree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "rcu_segcblist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* Communicate arguments to a workqueue handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct rcu_exp_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned long rew_s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct work_struct rew_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* RCU's kthread states for tracing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define RCU_KTHREAD_STOPPED 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define RCU_KTHREAD_RUNNING 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define RCU_KTHREAD_WAITING 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define RCU_KTHREAD_OFFCPU 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define RCU_KTHREAD_YIELDING 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define RCU_KTHREAD_MAX 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Definition for node within the RCU grace-period-detection hierarchy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct rcu_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* some rcu_state fields as well as */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* following. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned long gp_seq; /* Track rsp->gp_seq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long gp_seq_needed; /* Track furthest future GP request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned long completedqs; /* All QSes done for this node. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned long qsmask; /* CPUs or groups that need to switch in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* order for current grace period to proceed.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* In leaf rcu_node, each bit corresponds to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* an rcu_data structure, otherwise, each */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* bit corresponds to a child rcu_node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned long qsmaskinit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* Per-GP initial value for qsmask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Initialized from ->qsmaskinitnext at the */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* beginning of each grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned long qsmaskinitnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Online CPUs for next grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned long expmask; /* CPUs or groups that need to check in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* to allow the current expedited GP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned long expmaskinit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* Per-GP initial values for expmask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Initialized from ->expmaskinitnext at the */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* beginning of each expedited GP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned long expmaskinitnext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Online CPUs for next expedited GP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Any CPU that has ever been online will */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* have its bit set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned long cbovldmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* CPUs experiencing callback overload. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned long ffmask; /* Fully functional CPUs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned long grpmask; /* Mask to apply to parent qsmask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* Only one bit will be set in this mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int grplo; /* lowest-numbered CPU here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int grphi; /* highest-numbered CPU here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u8 grpnum; /* group number for next level up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u8 level; /* root is at level 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* exit RCU read-side critical sections */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* before propagating offline up the */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* rcu_node tree? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct rcu_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct list_head blkd_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Tasks blocked in RCU read-side critical */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* section. Tasks are placed at the head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* of this list and age towards the tail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct list_head *gp_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Pointer to the first task blocking the */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* current grace period, or NULL if there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* is no such task. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct list_head *exp_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Pointer to the first task blocking the */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* current expedited grace period, or NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* if there is no such task. If there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* is no current expedited grace period, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* then there can cannot be any such task. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct list_head *boost_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* Pointer to first task that needs to be */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* priority boosted, or NULL if no priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* boosting is needed for this rcu_node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* structure. If there are no tasks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* queued on this rcu_node structure that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* are blocking the current grace period, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* there can be no such task. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct rt_mutex boost_mtx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Used only for the priority-boosting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* side effect, not as a lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned long boost_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* When to start boosting (jiffies). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct task_struct *boost_kthread_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* kthread that takes care of priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* boosting for this rcu_node structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned int boost_kthread_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* State of boost_kthread_task for tracing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #ifdef CONFIG_RCU_NOCB_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct swait_queue_head nocb_gp_wq[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Place for rcu_nocb_kthread() to wait GP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned long exp_seq_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) wait_queue_head_t exp_wq[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct rcu_exp_work rew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) bool exp_need_flush; /* Need to flush workitem? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) } ____cacheline_internodealigned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * are indexed relative to this interval rather than the global CPU ID space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * This generates the bit for a CPU in node-local masks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * Union to allow "aggregate OR" operation on the need for a quiescent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * state by the normal and expedited grace periods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) union rcu_noqs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u8 norm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u8 exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) } b; /* Bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) u16 s; /* Set of bits, aggregate OR here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Per-CPU data for read-copy update. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct rcu_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* 1) quiescent-state and grace-period handling : */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned long gp_seq; /* Track rsp->gp_seq counter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned long gp_seq_needed; /* Track furthest future GP request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) bool core_needs_qs; /* Core waits for quiesc state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) bool beenonline; /* CPU online at least once. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) bool gpwrap; /* Possible ->gp_seq wrap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) bool cpu_started; /* RCU watching this onlining CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) unsigned long grpmask; /* Mask to apply to leaf qsmask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned long ticks_this_gp; /* The number of scheduling-clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* ticks this CPU has handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* during and after the last grace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* period it is aware of. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) bool defer_qs_iw_pending; /* Scheduler attention pending? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct work_struct strict_work; /* Schedule readers for strict GPs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* 2) batch handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct rcu_segcblist cblist; /* Segmented callback list, with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* different callbacks waiting for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* different grace periods. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) long qlen_last_fqs_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* qlen at last check for QS forcing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned long n_cbs_invoked; /* # callbacks invoked since boot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned long n_force_qs_snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* did other CPU force QS recently? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) long blimit; /* Upper limit on a processed batch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* 3) dynticks interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int dynticks_snap; /* Per-GP tracking for dynticks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) long dynticks_nesting; /* Track process nesting level. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) atomic_t dynticks; /* Even value for idle, else odd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) bool rcu_urgent_qs; /* GP old need light quiescent state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) bool rcu_forced_tick; /* Forced tick to provide QS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) bool rcu_forced_tick_exp; /* ... provide QS to expedited GP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #ifdef CONFIG_RCU_FAST_NO_HZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* 4) rcu_barrier(), OOM callbacks, and expediting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct rcu_head barrier_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int exp_dynticks_snap; /* Double-check need for IPI. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* 5) Callback offloading. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #ifdef CONFIG_RCU_NOCB_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct task_struct *nocb_gp_kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) raw_spinlock_t nocb_lock; /* Guard following pair of fields. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) atomic_t nocb_lock_contended; /* Contention experienced. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct timer_list nocb_timer; /* Enforce finite deferral. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) unsigned long nocb_gp_adv_time; /* Last call_rcu() CB adv (jiffies). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* The following fields are used by call_rcu, hence own cacheline. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct rcu_cblist nocb_bypass; /* Lock-contention-bypass CB list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int nocb_nobypass_count; /* # ->cblist enqueues at ^^^ time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* The following fields are used by GP kthread, hence own cacheline. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct timer_list nocb_bypass_timer; /* Force nocb_bypass flush. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u8 nocb_gp_sleep; /* Is the nocb GP thread asleep? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) u8 nocb_gp_bypass; /* Found a bypass on last scan? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) u8 nocb_gp_gp; /* GP to wait for on last scan? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) unsigned long nocb_gp_seq; /* If so, ->gp_seq to wait for. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned long nocb_gp_loops; /* # passes through wait code. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) bool nocb_cb_sleep; /* Is the nocb CB thread asleep? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct task_struct *nocb_cb_kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct rcu_data *nocb_next_cb_rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* Next rcu_data in wakeup chain. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* The following fields are used by CB kthread, hence new cacheline. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* GP rdp takes GP-end wakeups. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* 6) RCU priority boosting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct task_struct *rcu_cpu_kthread_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* rcuc per-CPU kthread or NULL. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) unsigned int rcu_cpu_kthread_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) char rcu_cpu_has_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* 7) Diagnostic data, including RCU CPU stall warnings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) unsigned int softirq_snap; /* Snapshot of softirq activity. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct irq_work rcu_iw; /* Check for non-irq activity. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) bool rcu_iw_pending; /* Is ->rcu_iw pending? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) short rcu_ofl_gp_flags; /* ->gp_flags at last offline. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) short rcu_onl_gp_flags; /* ->gp_flags at last online. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) unsigned long last_fqs_resched; /* Time of last rcu_resched(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Values for nocb_defer_wakeup field in struct rcu_data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #define RCU_NOCB_WAKE_NOT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #define RCU_NOCB_WAKE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #define RCU_NOCB_WAKE_FORCE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* For jiffies_till_first_fqs and */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* and jiffies_till_next_fqs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* delay between bouts of */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* quiescent-state forcing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* at least one scheduling clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* irq before ratting on them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #define rcu_wait(cond) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) for (;;) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) set_current_state(TASK_INTERRUPTIBLE); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (cond) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) schedule(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) __set_current_state(TASK_RUNNING); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * RCU global state, including node hierarchy. This hierarchy is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * represented in "heap" form in a dense array. The root (first level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * and the third level in ->node[m+1] and following (->node[m+1] referenced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * by ->level[2]). The number of levels is determined by the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * consisting of a single rcu_node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct rcu_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct rcu_node *level[RCU_NUM_LVLS + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* Hierarchy levels (+1 to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* shut bogus gcc warning) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int ncpus; /* # CPUs seen so far. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* The following fields are guarded by the root rcu_node's lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) u8 boost ____cacheline_internodealigned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* Subject to priority boost. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned long gp_seq; /* Grace-period sequence #. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) unsigned long gp_max; /* Maximum GP duration in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* jiffies. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct task_struct *gp_kthread; /* Task for grace periods. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct swait_queue_head gp_wq; /* Where GP task waits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) short gp_flags; /* Commands for GP task. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) short gp_state; /* GP kthread sleep state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) unsigned long gp_wake_time; /* Last GP kthread wake. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* End of fields guarded by root rcu_node's lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct mutex barrier_mutex; /* Guards barrier fields. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) atomic_t barrier_cpu_count; /* # CPUs waiting on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct completion barrier_completion; /* Wake at barrier end. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) unsigned long barrier_sequence; /* ++ at start and end of */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* rcu_barrier(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* End of fields guarded by barrier_mutex. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct mutex exp_mutex; /* Serialize expedited GP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct mutex exp_wake_mutex; /* Serialize wakeup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) unsigned long expedited_sequence; /* Take a ticket. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) atomic_t expedited_need_qs; /* # CPUs left to check in. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct swait_queue_head expedited_wq; /* Wait for check-ins. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int ncpus_snap; /* # CPUs seen last time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) u8 cbovld; /* Callback overload now? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) u8 cbovldnext; /* ^ ^ next time? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) unsigned long jiffies_force_qs; /* Time at which to invoke */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* force_quiescent_state(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned long jiffies_kick_kthreads; /* Time at which to kick */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* kthreads, if configured. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) unsigned long n_force_qs; /* Number of calls to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* force_quiescent_state(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned long gp_start; /* Time at which GP started, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* but in jiffies. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned long gp_end; /* Time last GP ended, again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* in jiffies. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) unsigned long gp_activity; /* Time of last GP kthread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* activity in jiffies. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) unsigned long gp_req_activity; /* Time of last GP request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* in jiffies. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) unsigned long jiffies_stall; /* Time at which to check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* for CPU stalls. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) unsigned long jiffies_resched; /* Time at which to resched */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* a reluctant CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* GP start. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) const char *name; /* Name of structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) char abbr; /* Abbreviated name. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) raw_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* Synchronize offline with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* GP pre-initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* Values for rcu_state structure's gp_flags field. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #define RCU_GP_FLAG_OVLD 0x4 /* Experiencing callback overload. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Values for rcu_state structure's gp_state field. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) #define RCU_GP_ONOFF 3 /* Grace-period initialization hotplug. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #define RCU_GP_INIT 4 /* Grace-period initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #define RCU_GP_WAIT_FQS 5 /* Wait for force-quiescent-state time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #define RCU_GP_DOING_FQS 6 /* Wait done for force-quiescent-state time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * In order to export the rcu_state name to the tracing tools, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * needs to be added in the __tracepoint_string section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * This requires defining a separate variable tp_<sname>_varname
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * that points to the string being used, and this will allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * the tracing userspace tools to be able to decipher the string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * address to the matching string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) #ifdef CONFIG_PREEMPT_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #define RCU_ABBR 'p'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #define RCU_NAME_RAW "rcu_preempt"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #else /* #ifdef CONFIG_PREEMPT_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) #define RCU_ABBR 's'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #define RCU_NAME_RAW "rcu_sched"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #ifndef CONFIG_TRACING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #define RCU_NAME RCU_NAME_RAW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) #else /* #ifdef CONFIG_TRACING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static char rcu_name[] = RCU_NAME_RAW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #define RCU_NAME rcu_name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) #endif /* #else #ifdef CONFIG_TRACING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* Forward declarations for tree_plugin.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static void rcu_bootup_announce(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static void rcu_qs(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #endif /* #ifdef CONFIG_HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static int rcu_print_task_exp_stall(struct rcu_node *rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static void rcu_flavor_sched_clock_irq(int user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static bool rcu_is_callbacks_kthread(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static void rcu_cpu_kthread_setup(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static void __init rcu_spawn_boost_kthreads(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void rcu_prepare_kthreads(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void rcu_cleanup_after_idle(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static void rcu_prepare_for_idle(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static void rcu_preempt_deferred_qs(struct task_struct *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static void zero_cpu_stall_ticks(struct rcu_data *rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static void rcu_init_one_nocb(struct rcu_node *rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) unsigned long j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) bool *was_alldone, unsigned long flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) unsigned long flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static void rcu_spawn_cpu_nocb_kthread(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static void __init rcu_spawn_nocb_kthreads(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static void show_rcu_nocb_state(struct rcu_data *rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static void rcu_nocb_lock(struct rcu_data *rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static void rcu_nocb_unlock(struct rcu_data *rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) unsigned long flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) #ifdef CONFIG_RCU_NOCB_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static void __init rcu_organize_nocb_kthreads(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) #define rcu_nocb_lock_irqsave(rdp, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!rcu_segcblist_is_offloaded(&(rdp)->cblist)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) local_irq_save(flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) raw_spin_lock_irqsave(&(rdp)->nocb_lock, (flags)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) #else /* #ifdef CONFIG_RCU_NOCB_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) #define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static void rcu_bind_gp_kthread(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static bool rcu_nohz_full_cpu(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static void rcu_dynticks_task_enter(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static void rcu_dynticks_task_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void rcu_dynticks_task_trace_enter(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static void rcu_dynticks_task_trace_exit(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* Forward declarations for tree_stall.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static void record_gp_stall_check_time(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static void rcu_iw_handler(struct irq_work *iwp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static void check_cpu_stall(struct rcu_data *rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) const unsigned long gpssdelay);