Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /* SPDX-License-Identifier: GPL-2.0+ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Internal non-public definitions that provide either classic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * or preemptible semantics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright Red Hat, 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright IBM Corporation, 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Author: Ingo Molnar <mingo@elte.hu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *	   Paul E. McKenney <paulmck@linux.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include "../locking/rtmutex_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #ifdef CONFIG_RCU_NOCB_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * Check the RCU kernel configuration parameters and print informative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * messages about anything out of the ordinary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) static void __init rcu_bootup_announce_oddness(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 	if (IS_ENABLED(CONFIG_RCU_TRACE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 		pr_info("\tRCU event tracing is enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	    (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 		pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 			RCU_FANOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	if (rcu_fanout_exact)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 		pr_info("\tHierarchical RCU autobalancing is disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 		pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	if (IS_ENABLED(CONFIG_PROVE_RCU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 		pr_info("\tRCU lockdep checking is enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 		pr_info("\tRCU strict (and thus non-scalable) grace periods enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	if (RCU_NUM_LVLS >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 		pr_info("\tFour(or more)-level hierarchy is enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	if (RCU_FANOUT_LEAF != 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 		pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 			RCU_FANOUT_LEAF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 		pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 			rcu_fanout_leaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	if (nr_cpu_ids != NR_CPUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 		pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #ifdef CONFIG_RCU_BOOST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 		kthread_prio, CONFIG_RCU_BOOST_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	if (blimit != DEFAULT_RCU_BLIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	if (qhimark != DEFAULT_RCU_QHIMARK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 		pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	if (qlowmark != DEFAULT_RCU_QLOMARK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	if (qovld != DEFAULT_RCU_QOVLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	if (jiffies_till_first_fqs != ULONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	if (jiffies_till_next_fqs != ULONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	if (jiffies_till_sched_qs != ULONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	if (rcu_kick_kthreads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		pr_info("\tKick kthreads if too-long grace period.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 		pr_info("\tRCU callback double-/use-after-free debug enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	if (gp_preinit_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	if (gp_init_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 		pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	if (gp_cleanup_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	if (!use_softirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		pr_info("\tRCU debug extended QS entry/exit.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	rcupdate_announce_bootup_oddness();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #ifdef CONFIG_PREEMPT_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static void rcu_read_unlock_special(struct task_struct *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  * Tell them what RCU they are running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) static void __init rcu_bootup_announce(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	pr_info("Preemptible hierarchical RCU implementation.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	rcu_bootup_announce_oddness();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) /* Flags for rcu_preempt_ctxt_queue() decision table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define RCU_GP_TASKS	0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define RCU_EXP_TASKS	0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define RCU_GP_BLKD	0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define RCU_EXP_BLKD	0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107)  * Queues a task preempted within an RCU-preempt read-side critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  * section into the appropriate location within the ->blkd_tasks list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109)  * depending on the states of any ongoing normal and expedited grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110)  * periods.  The ->gp_tasks pointer indicates which element the normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)  * indicates which element the expedited grace period is waiting on (again,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  * NULL if none).  If a grace period is waiting on a given element in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  * ->blkd_tasks list, it also waits on all subsequent elements.  Thus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  * adding a task to the tail of the list blocks any grace period that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  * already waiting on one of the elements.  In contrast, adding a task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)  * to the head of the list won't block any grace period that is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  * waiting on one of the elements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  * This queuing is imprecise, and can sometimes make an ongoing grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  * period wait for a task that is not strictly speaking blocking it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  * Given the choice, we needlessly block a normal grace period rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  * blocking an expedited grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  * Note that an endless sequence of expedited grace periods still cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  * indefinitely postpone a normal grace period.  Eventually, all of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  * fixed number of preempted tasks blocking the normal grace period that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * not also blocking the expedited grace period will resume and complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  * their RCU read-side critical sections.  At that point, the ->gp_tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  * pointer will equal the ->exp_tasks pointer, at which point the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  * the corresponding expedited grace period will also be the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  * normal grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	__releases(rnp->lock) /* But leaves rrupts disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 			 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 			 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 			 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	struct task_struct *t = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	raw_lockdep_assert_held_rcu_node(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	WARN_ON_ONCE(rdp->mynode != rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	/* RCU better not be waiting on newly onlined CPUs! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		     rdp->grpmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	 * Decide where to queue the newly blocked task.  In theory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	 * this could be an if-statement.  In practice, when I tried
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	 * that, it was quite messy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	switch (blkd_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	case                RCU_EXP_TASKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	case                RCU_EXP_TASKS + RCU_GP_BLKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	case RCU_GP_TASKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	case RCU_GP_TASKS + RCU_EXP_TASKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		 * Blocking neither GP, or first task blocking the normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		 * GP but not blocking the already-waiting expedited GP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		 * Queue at the head of the list to avoid unnecessarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		 * blocking the already-waiting GPs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	case                                              RCU_EXP_BLKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	case                                RCU_GP_BLKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	case                                RCU_GP_BLKD + RCU_EXP_BLKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	case RCU_GP_TASKS +                               RCU_EXP_BLKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	case RCU_GP_TASKS +                 RCU_GP_BLKD + RCU_EXP_BLKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		 * First task arriving that blocks either GP, or first task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		 * arriving that blocks the expedited GP (with the normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		 * GP already waiting), or a task arriving that blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		 * both GPs with both GPs already waiting.  Queue at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		 * tail of the list to avoid any GP waiting on any of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		 * already queued tasks that are not blocking it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	case                RCU_EXP_TASKS +               RCU_EXP_BLKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	case                RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	case RCU_GP_TASKS + RCU_EXP_TASKS +               RCU_EXP_BLKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		 * Second or subsequent task blocking the expedited GP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		 * The task either does not block the normal GP, or is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		 * first task blocking the normal GP.  Queue just after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		 * the first task blocking the expedited GP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		list_add(&t->rcu_node_entry, rnp->exp_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	case RCU_GP_TASKS +                 RCU_GP_BLKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		 * Second or subsequent task blocking the normal GP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		 * The task does not block the expedited GP. Queue just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		 * after the first task blocking the normal GP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		list_add(&t->rcu_node_entry, rnp->gp_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		/* Yet another exercise in excessive paranoia. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	 * We have now queued the task.  If it was the first one to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	 * block either grace period, update the ->gp_tasks and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	 * ->exp_tasks pointers, respectively, to reference the newly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	 * blocked tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		     !(rnp->qsmask & rdp->grpmask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		     !(rnp->expmask & rdp->grpmask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	 * Report the quiescent state for the expedited GP.  This expedited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	 * GP should not be able to end until we report, so there should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	 * no need to check for a subsequent expedited GP.  (Though we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	 * still in a quiescent state in any case.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		rcu_report_exp_rdp(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		WARN_ON_ONCE(rdp->exp_deferred_qs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  * Record a preemptible-RCU quiescent state for the specified CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  * Note that this does not necessarily mean that the task currently running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  * on the CPU is in a quiescent state:  Instead, it means that the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  * grace period need not wait on any RCU read-side critical section that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  * starts later on this CPU.  It also means that if the current task is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  * in an RCU read-side critical section, it has already added itself to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  * some leaf rcu_node structure's ->blkd_tasks list.  In addition to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  * current task, there might be any number of other tasks blocked while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  * in an RCU read-side critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  * Callers to this function must disable preemption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) static void rcu_qs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	if (__this_cpu_read(rcu_data.cpu_no_qs.s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		trace_rcu_grace_period(TPS("rcu_preempt"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 				       __this_cpu_read(rcu_data.gp_seq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 				       TPS("cpuqs"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  * We have entered the scheduler, and the current task might soon be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)  * context-switched away from.  If this task is in an RCU read-side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  * critical section, we will no longer be able to rely on the CPU to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  * record that fact, so we enqueue the task on the blkd_tasks list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  * The task will dequeue itself when it exits the outermost enclosing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  * RCU read-side critical section.  Therefore, the current grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  * cannot be permitted to complete until the blkd_tasks list entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  * predating the current grace period drain, in other words, until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  * rnp->gp_tasks becomes NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  * Caller must disable interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) void rcu_note_context_switch(bool preempt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	struct task_struct *t = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	struct rcu_node *rnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	trace_rcu_utilization(TPS("Start context switch"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	if (rcu_preempt_depth() > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	    !t->rcu_read_unlock_special.b.blocked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		/* Possibly blocking in an RCU read-side critical section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		rnp = rdp->mynode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		raw_spin_lock_rcu_node(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		t->rcu_read_unlock_special.b.blocked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		t->rcu_blocked_node = rnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		 * Verify the CPU's sanity, trace the preemption, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		 * then queue the task as required based on the states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		 * of any ongoing and expedited grace periods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		trace_rcu_preempt_task(rcu_state.name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 				       t->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 				       (rnp->qsmask & rdp->grpmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 				       ? rnp->gp_seq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 				       : rcu_seq_snap(&rnp->gp_seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		rcu_preempt_ctxt_queue(rnp, rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		rcu_preempt_deferred_qs(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	 * Either we were not in an RCU read-side critical section to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	 * begin with, or we have now recorded that critical section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	 * globally.  Either way, we can now note a quiescent state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	 * for this CPU.  Again, if we were in an RCU read-side critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	 * section, and if that critical section was blocking the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	 * grace period, then the fact that the task has been enqueued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	 * means that we continue to block the current grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	rcu_qs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	if (rdp->exp_deferred_qs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		rcu_report_exp_rdp(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	rcu_tasks_qs(current, preempt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	trace_rcu_utilization(TPS("End context switch"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) EXPORT_SYMBOL_GPL(rcu_note_context_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  * Check for preempted RCU readers blocking the current grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  * for the specified rcu_node structure.  If the caller needs a reliable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  * answer, it must hold the rcu_node's ->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	return READ_ONCE(rnp->gp_tasks) != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) /* limit value for ->rcu_read_lock_nesting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) #define RCU_NEST_PMAX (INT_MAX / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) static void rcu_preempt_read_enter(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	current->rcu_read_lock_nesting++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) static int rcu_preempt_read_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	return --current->rcu_read_lock_nesting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) static void rcu_preempt_depth_set(int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	current->rcu_read_lock_nesting = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  * Preemptible RCU implementation for rcu_read_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371)  * Just increment ->rcu_read_lock_nesting, shared state will be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372)  * if we block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) void __rcu_read_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	rcu_preempt_read_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && rcu_state.gp_kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	barrier();  /* critical section after entry code. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) EXPORT_SYMBOL_GPL(__rcu_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386)  * Preemptible RCU implementation for rcu_read_unlock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387)  * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388)  * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389)  * invoke rcu_read_unlock_special() to clean up after a context switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390)  * in an RCU read-side critical section and other special cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) void __rcu_read_unlock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	struct task_struct *t = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (rcu_preempt_read_exit() == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		barrier();  /* critical section before exit code. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			rcu_read_unlock_special(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		int rrln = rcu_preempt_depth();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) EXPORT_SYMBOL_GPL(__rcu_read_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  * Advance a ->blkd_tasks-list pointer to the next entry, instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  * returning NULL if at the end of the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) static struct list_head *rcu_next_node_entry(struct task_struct *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 					     struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	struct list_head *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	np = t->rcu_node_entry.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	if (np == &rnp->blkd_tasks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		np = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	return np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  * Return true if the specified rcu_node structure has tasks that were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  * preempted within an RCU read-side critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	return !list_empty(&rnp->blkd_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434)  * Report deferred quiescent states.  The deferral time can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435)  * be quite short, for example, in the case of the call from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436)  * rcu_read_unlock_special().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	bool empty_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	bool empty_norm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	bool empty_exp_now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	struct list_head *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	bool drop_boost_mutex = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	struct rcu_data *rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	struct rcu_node *rnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	union rcu_special special;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	 * If RCU core is waiting for this CPU to exit its critical section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	 * report the fact that it has exited.  Because irqs are disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	 * t->rcu_read_unlock_special cannot change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	special = t->rcu_read_unlock_special;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	rdp = this_cpu_ptr(&rcu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (!special.s && !rdp->exp_deferred_qs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	t->rcu_read_unlock_special.s = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	if (special.b.need_qs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			rcu_report_qs_rdp(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			udelay(rcu_unlock_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			rcu_qs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	 * Respond to a request by an expedited grace period for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	 * quiescent state from this CPU.  Note that requests from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	 * tasks are handled when removing the task from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	 * blocked-tasks list below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	if (rdp->exp_deferred_qs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		rcu_report_exp_rdp(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	/* Clean up if blocked during RCU read-side critical section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (special.b.blocked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		 * Remove this task from the list it blocked on.  The task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		 * now remains queued on the rcu_node corresponding to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		 * CPU it first blocked on, so there is no longer any need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		 * to loop.  Retain a WARN_ON_ONCE() out of sheer paranoia.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		rnp = t->rcu_blocked_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		WARN_ON_ONCE(rnp != t->rcu_blocked_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 			     (!empty_norm || rnp->qsmask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		empty_exp = sync_rcu_exp_done(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		np = rcu_next_node_entry(t, rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		list_del_init(&t->rcu_node_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		t->rcu_blocked_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 						rnp->gp_seq, t->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		if (&t->rcu_node_entry == rnp->gp_tasks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			WRITE_ONCE(rnp->gp_tasks, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		if (&t->rcu_node_entry == rnp->exp_tasks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 			WRITE_ONCE(rnp->exp_tasks, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		if (IS_ENABLED(CONFIG_RCU_BOOST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 			/* Snapshot ->boost_mtx ownership w/rnp->lock held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 			if (&t->rcu_node_entry == rnp->boost_tasks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 				WRITE_ONCE(rnp->boost_tasks, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		 * If this was the last task on the current list, and if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		 * we aren't waiting on any CPUs, report the quiescent state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		 * so we must take a snapshot of the expedited state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		empty_exp_now = sync_rcu_exp_done(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 			trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 							 rnp->gp_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 							 0, rnp->qsmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 							 rnp->level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 							 rnp->grplo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 							 rnp->grphi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 							 !!rnp->gp_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 			rcu_report_unblock_qs_rnp(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		 * If this was the last task on the expedited lists,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		 * then we need to report up the rcu_node hierarchy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		if (!empty_exp && empty_exp_now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			rcu_report_exp_rnp(rnp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		/* Unboost if we were boosted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			rt_mutex_futex_unlock(&rnp->boost_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  * Is a deferred quiescent-state pending, and are we also not in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  * an RCU read-side critical section?  It is the caller's responsibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  * to ensure it is otherwise safe to report any deferred quiescent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  * states.  The reason for this is that it is safe to report a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  * quiescent state during context switch even though preemption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  * is disabled.  This function cannot be expected to understand these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  * nuances, so the caller must handle them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		READ_ONCE(t->rcu_read_unlock_special.s)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	       rcu_preempt_depth() == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  * Report a deferred quiescent state if needed and safe to do so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  * As with rcu_preempt_need_deferred_qs(), "safe" involves only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * not being in an RCU read-side critical section.  The caller must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * evaluate safety in terms of interrupt, softirq, and preemption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  * disabling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) static void rcu_preempt_deferred_qs(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	if (!rcu_preempt_need_deferred_qs(t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	rcu_preempt_deferred_qs_irqrestore(t, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  * Minimal handler to give the scheduler a chance to re-evaluate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	struct rcu_data *rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	rdp->defer_qs_iw_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595)  * Handle special cases during rcu_read_unlock(), such as needing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596)  * notify RCU core processing or task having blocked during the RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597)  * read-side critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) static void rcu_read_unlock_special(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	bool preempt_bh_were_disabled =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 			!!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	bool irqs_were_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	/* NMI handlers cannot block and cannot safely manipulate state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	if (in_nmi())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	irqs_were_disabled = irqs_disabled_flags(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	if (preempt_bh_were_disabled || irqs_were_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		bool exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		struct rcu_node *rnp = rdp->mynode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		exp = (t->rcu_blocked_node &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		       READ_ONCE(t->rcu_blocked_node->exp_tasks)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		      (rdp->grpmask & READ_ONCE(rnp->expmask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		// Need to defer quiescent state until everything is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		if (use_softirq && (in_irq() || (exp && !irqs_were_disabled))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			// Using softirq, safe to awaken, and either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			// wakeup is free or there is an expedited GP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 			raise_softirq_irqoff(RCU_SOFTIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 			// Enabling BH or preempt does reschedule, so...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			// Also if no expediting, slow is OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			// Plus nohz_full CPUs eventually get tick enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			set_tsk_need_resched(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			set_preempt_need_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			    !rdp->defer_qs_iw_pending && exp && cpu_online(rdp->cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 				// Get scheduler to re-evaluate and call hooks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 				// If !IRQ_WORK, FQS scan will eventually IPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 				init_irq_work(&rdp->defer_qs_iw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 					      rcu_preempt_deferred_qs_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 				rdp->defer_qs_iw_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 				irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	rcu_preempt_deferred_qs_irqrestore(t, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  * Check that the list of blocked tasks for the newly completed grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649)  * period is in fact empty.  It is a serious bug to complete a grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650)  * period that still has RCU readers blocked!  This function must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  * invoked -before- updating this rnp's ->gp_seq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653)  * Also, if there are blocked tasks on the list, they automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654)  * block the newly created grace period, so set up ->gp_tasks accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	struct task_struct *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	raw_lockdep_assert_held_rcu_node(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		dump_blkd_tasks(rnp, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	if (rcu_preempt_has_tasks(rnp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	    (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		t = container_of(rnp->gp_tasks, struct task_struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 				 rcu_node_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 						rnp->gp_seq, t->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	WARN_ON_ONCE(rnp->qsmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)  * Check for a quiescent state from the current CPU, including voluntary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  * context switches for Tasks RCU.  When a task blocks, the task is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  * recorded in the corresponding CPU's rcu_node structure, which is checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  * elsewhere, hence this function need only check for quiescent states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  * related to the current CPU, not to those related to tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) static void rcu_flavor_sched_clock_irq(int user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	struct task_struct *t = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	if (user || rcu_is_cpu_rrupt_from_idle()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		rcu_note_voluntary_context_switch(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	if (rcu_preempt_depth() > 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	    (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		/* No QS, force context switch if deferred. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		if (rcu_preempt_need_deferred_qs(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			set_tsk_need_resched(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			set_preempt_need_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	} else if (rcu_preempt_need_deferred_qs(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		rcu_preempt_deferred_qs(t); /* Report deferred QS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	} else if (!WARN_ON_ONCE(rcu_preempt_depth())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		rcu_qs(); /* Report immediate QS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	/* If GP is oldish, ask for help from rcu_read_unlock_special(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (rcu_preempt_depth() > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	    __this_cpu_read(rcu_data.core_needs_qs) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	    __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	    !t->rcu_read_unlock_special.b.need_qs &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	    time_after(jiffies, rcu_state.gp_start + HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		t->rcu_read_unlock_special.b.need_qs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  * Check for a task exiting while in a preemptible-RCU read-side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716)  * critical section, clean up if so.  No need to issue warnings, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717)  * debug_check_no_locks_held() already does this if lockdep is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718)  * Besides, if this function does anything other than just immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719)  * return, there was a bug of some sort.  Spewing warnings from this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  * function is like as not to simply obscure important prior warnings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) void exit_rcu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	struct task_struct *t = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	if (unlikely(!list_empty(&current->rcu_node_entry))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		rcu_preempt_depth_set(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	} else if (unlikely(rcu_preempt_depth())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		rcu_preempt_depth_set(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	__rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	rcu_preempt_deferred_qs(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740)  * Dump the blocked-tasks state, but limit the list dump to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  * specified number of elements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	struct list_head *lhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	bool onl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	struct rcu_data *rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	struct rcu_node *rnp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	raw_lockdep_assert_held_rcu_node(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		__func__, rnp->grplo, rnp->grphi, rnp->level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		(long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		__func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		READ_ONCE(rnp->exp_tasks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	pr_info("%s: ->blkd_tasks", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	list_for_each(lhp, &rnp->blkd_tasks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		pr_cont(" %p", lhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		if (++i >= ncheck)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		rdp = per_cpu_ptr(&rcu_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			cpu, ".o"[onl],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) #else /* #ifdef CONFIG_PREEMPT_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784)  * If strict grace periods are enabled, and if the calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785)  * __rcu_read_unlock() marks the beginning of a quiescent state, immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786)  * report that quiescent state and, if requested, spin for a bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) void rcu_read_unlock_strict(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	struct rcu_data *rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	if (!IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	   irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	rdp = this_cpu_ptr(&rcu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	rcu_report_qs_rdp(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	udelay(rcu_unlock_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) EXPORT_SYMBOL_GPL(rcu_read_unlock_strict);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802)  * Tell them what RCU they are running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) static void __init rcu_bootup_announce(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	pr_info("Hierarchical RCU implementation.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	rcu_bootup_announce_oddness();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811)  * Note a quiescent state for PREEMPTION=n.  Because we do not need to know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812)  * how many quiescent states passed, just if there was at least one since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813)  * the start of the grace period, this just sets a flag.  The caller must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  * have disabled preemption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) static void rcu_qs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	if (!__this_cpu_read(rcu_data.cpu_no_qs.s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	trace_rcu_grace_period(TPS("rcu_sched"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			       __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831)  * Register an urgently needed quiescent state.  If there is an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832)  * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833)  * dyntick-idle quiescent state visible to other CPUs, which will in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834)  * some cases serve for expedited as well as normal grace periods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  * Either way, register a lightweight quiescent state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) void rcu_all_qs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	/* Load rcu_urgent_qs before other flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	this_cpu_write(rcu_data.rcu_urgent_qs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		rcu_momentary_dyntick_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	rcu_qs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) EXPORT_SYMBOL_GPL(rcu_all_qs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  * Note a PREEMPTION=n context switch. The caller must have disabled interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) void rcu_note_context_switch(bool preempt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	trace_rcu_utilization(TPS("Start context switch"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	rcu_qs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	/* Load rcu_urgent_qs before other flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	this_cpu_write(rcu_data.rcu_urgent_qs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		rcu_momentary_dyntick_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	rcu_tasks_qs(current, preempt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	trace_rcu_utilization(TPS("End context switch"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) EXPORT_SYMBOL_GPL(rcu_note_context_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880)  * Because preemptible RCU does not exist, there are never any preempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881)  * RCU readers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889)  * Because there is no preemptible RCU, there can be no readers blocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  * Because there is no preemptible RCU, there can be no deferred quiescent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  * states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) static void rcu_preempt_deferred_qs(struct task_struct *t) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  * Because there is no preemptible RCU, there can be no readers blocked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  * so there is no need to check for blocked tasks.  So check only for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  * bogus qsmask values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	WARN_ON_ONCE(rnp->qsmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917)  * Check to see if this CPU is in a non-context-switch quiescent state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918)  * namely user mode and idle loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) static void rcu_flavor_sched_clock_irq(int user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	if (user || rcu_is_cpu_rrupt_from_idle()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		 * Get here if this CPU took its interrupt from user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		 * mode or from the idle loop, and if this is not a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		 * nested interrupt.  In this case, the CPU is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		 * a quiescent state, so note it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		 * No memory barrier is required here because rcu_qs()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		 * references only CPU-local variables that other CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		 * neither access nor modify, at least not while the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		 * corresponding CPU is online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		rcu_qs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  * Because preemptible RCU does not exist, tasks cannot possibly exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942)  * while in preemptible RCU read-side critical sections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) void exit_rcu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949)  * Dump the guaranteed-empty blocked-tasks state.  Trust but verify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960)  * If boosting, set rcuc kthreads to realtime priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) static void rcu_cpu_kthread_setup(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) #ifdef CONFIG_RCU_BOOST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	struct sched_param sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	sp.sched_priority = kthread_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) #endif /* #ifdef CONFIG_RCU_BOOST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) #ifdef CONFIG_RCU_BOOST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975)  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976)  * or ->boost_tasks, advancing the pointer to the next task in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977)  * ->blkd_tasks list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979)  * Note that irqs must be enabled: boosting the task can block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980)  * Returns 1 if there are more tasks needing to be boosted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) static int rcu_boost(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	struct task_struct *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	struct list_head *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	if (READ_ONCE(rnp->exp_tasks) == NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	    READ_ONCE(rnp->boost_tasks) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		return 0;  /* Nothing left to boost. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	 * Recheck under the lock: all tasks in need of boosting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	 * might exit their RCU read-side critical sections on their own.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	 * Preferentially boost tasks blocking expedited grace periods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	 * This cannot starve the normal grace periods because a second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	 * expedited grace period must boost all blocked tasks, including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 * those blocking the pre-existing normal grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	if (rnp->exp_tasks != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		tb = rnp->exp_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		tb = rnp->boost_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	 * We boost task t by manufacturing an rt_mutex that appears to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	 * be held by task t.  We leave a pointer to that rt_mutex where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	 * task t can find it, and task t will release the mutex when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	 * exits its outermost RCU read-side critical section.  Then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	 * simply acquiring this artificial rt_mutex will boost task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	 * t's priority.  (Thanks to tglx for suggesting this approach!)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	 * Note that task t must acquire rnp->lock to remove itself from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	 * the ->blkd_tasks list, which it will do from exit() if from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	 * nowhere else.  We therefore are guaranteed that task t will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	 * stay around at least until we drop rnp->lock.  Note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	 * rnp->lock also resolves races between our priority boosting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	 * and task t's exiting its outermost RCU read-side critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	 * section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	t = container_of(tb, struct task_struct, rcu_node_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	/* Lock only for side effect: boosts task t's priority. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	rt_mutex_lock(&rnp->boost_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	return READ_ONCE(rnp->exp_tasks) != NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	       READ_ONCE(rnp->boost_tasks) != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)  * Priority-boosting kthread, one per leaf rcu_node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static int rcu_boost_kthread(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	struct rcu_node *rnp = (struct rcu_node *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	int spincnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	int more2boost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	trace_rcu_utilization(TPS("Start boost kthread@init"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		rcu_wait(READ_ONCE(rnp->boost_tasks) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			 READ_ONCE(rnp->exp_tasks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		more2boost = rcu_boost(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		if (more2boost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			spincnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			spincnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		if (spincnt > 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			schedule_timeout_idle(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			spincnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	/* NOTREACHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	trace_rcu_utilization(TPS("End boost kthread@notreached"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)  * Check to see if it is time to start boosting RCU readers that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)  * blocking the current grace period, and, if so, tell the per-rcu_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)  * kthread to start boosting them.  If there is an expedited grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)  * period in progress, it is always time to boost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)  * The caller must hold rnp->lock, which this function releases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)  * The ->boost_kthread_task is immortal, so we don't need to worry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)  * about it going away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	__releases(rnp->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	raw_lockdep_assert_held_rcu_node(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	if (rnp->exp_tasks != NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	    (rnp->gp_tasks != NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	     rnp->boost_tasks == NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	     rnp->qsmask == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	     (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		if (rnp->exp_tasks == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		rcu_wake_cond(rnp->boost_kthread_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			      READ_ONCE(rnp->boost_kthread_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)  * Is the current CPU running the RCU-callbacks kthread?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)  * Caller must have preemption disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static bool rcu_is_callbacks_kthread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)  * Do priority-boost accounting for the start of a new grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)  * Create an RCU-boost kthread for the specified node if one does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)  * already exist.  We only create this kthread for preemptible RCU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)  * Returns zero if all is well, a negated errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	int rnp_index = rnp - rcu_get_root();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	struct sched_param sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	struct task_struct *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	rcu_state.boost = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	if (rnp->boost_kthread_task != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	t = kthread_create(rcu_boost_kthread, (void *)rnp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 			   "rcub/%d", rnp_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	if (WARN_ON_ONCE(IS_ERR(t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	rnp->boost_kthread_task = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	sp.sched_priority = kthread_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)  * Set the per-rcu_node kthread's affinity to cover all CPUs that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)  * served by the rcu_node in question.  The CPU hotplug lock is still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)  * held, so the value of rnp->qsmaskinit will be stable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)  * We don't include outgoingcpu in the affinity set, use -1 if there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)  * no outgoing CPU.  If there are no CPUs left in the affinity set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)  * this function allows the kthread to execute on any CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	struct task_struct *t = rnp->boost_kthread_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	unsigned long mask = rcu_rnp_online_cpus(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	cpumask_var_t cm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	for_each_leaf_node_possible_cpu(rnp, cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		    cpu != outgoingcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 			cpumask_set_cpu(cpu, cm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	if (cpumask_weight(cm) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		cpumask_setall(cm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	set_cpus_allowed_ptr(t, cm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	free_cpumask_var(cm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)  * Spawn boost kthreads -- called as soon as the scheduler is running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) static void __init rcu_spawn_boost_kthreads(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	struct rcu_node *rnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	rcu_for_each_leaf_node(rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		rcu_spawn_one_boost_kthread(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) static void rcu_prepare_kthreads(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	struct rcu_node *rnp = rdp->mynode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	if (rcu_scheduler_fully_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		rcu_spawn_one_boost_kthread(rnp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) #else /* #ifdef CONFIG_RCU_BOOST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	__releases(rnp->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) static bool rcu_is_callbacks_kthread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) static void __init rcu_spawn_boost_kthreads(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) static void rcu_prepare_kthreads(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) #endif /* #else #ifdef CONFIG_RCU_BOOST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) #if !defined(CONFIG_RCU_FAST_NO_HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)  * Check to see if any future non-offloaded RCU-related work will need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)  * to be done by the current CPU, even if none need be done immediately,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)  * returning 1 if so.  This function is part of the RCU implementation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)  * it is -not- an exported member of the RCU API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)  * Because we not have RCU_FAST_NO_HZ, just check whether or not this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)  * CPU has RCU callbacks queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) int rcu_needs_cpu(u64 basemono, u64 *nextevt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	*nextevt = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	       !rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)  * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)  * after it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static void rcu_cleanup_after_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)  * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)  * is nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) static void rcu_prepare_for_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)  * This code is invoked when a CPU goes idle, at which point we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)  * to have the CPU do everything required for RCU so that it can enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)  * the energy-efficient dyntick-idle mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)  * The following preprocessor symbol controls this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)  * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)  *	to sleep in dyntick-idle mode with RCU callbacks pending.  This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)  *	is sized to be roughly one RCU grace period.  Those energy-efficiency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)  *	benchmarkers who might otherwise be tempted to set this to a large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)  *	number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)  *	system.  And if you are -that- concerned about energy efficiency,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)  *	just power the system down and be done with it!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)  * The value below works well in practice.  If future workloads require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)  * adjustment, they can be converted into kernel config parameters, though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)  * making the state machine smarter might be a better option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) #define RCU_IDLE_GP_DELAY 4		/* Roughly one grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) module_param(rcu_idle_gp_delay, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)  * Try to advance callbacks on the current CPU, but only if it has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)  * awhile since the last time we did so.  Afterwards, if there are any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)  * callbacks ready for immediate invocation, return true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) static bool __maybe_unused rcu_try_advance_all_cbs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	bool cbs_ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	struct rcu_node *rnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	/* Exit early if we advanced recently. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	if (jiffies == rdp->last_advance_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	rdp->last_advance_all = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	rnp = rdp->mynode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	 * Don't bother checking unless a grace period has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	 * completed since we last checked and there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	 * callbacks not yet ready to invoke.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	if ((rcu_seq_completed_gp(rdp->gp_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 				  rcu_seq_current(&rnp->gp_seq)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	     unlikely(READ_ONCE(rdp->gpwrap))) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	    rcu_segcblist_pend_cbs(&rdp->cblist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		note_gp_changes(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	if (rcu_segcblist_ready_cbs(&rdp->cblist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		cbs_ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	return cbs_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)  * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)  * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)  * caller about what to set the timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)  * The caller must have disabled interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) int rcu_needs_cpu(u64 basemono, u64 *nextevt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	unsigned long dj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	/* If no non-offloaded callbacks, RCU doesn't need the CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	if (rcu_segcblist_empty(&rdp->cblist) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	    rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		*nextevt = KTIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	/* Attempt to advance callbacks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	if (rcu_try_advance_all_cbs()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		/* Some ready to invoke, so initiate later invocation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		invoke_rcu_core();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	rdp->last_accelerate = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	/* Request timer and round. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	dj = round_up(rcu_idle_gp_delay + jiffies, rcu_idle_gp_delay) - jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	*nextevt = basemono + dj * TICK_NSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)  * Prepare a CPU for idle from an RCU perspective.  The first major task is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)  * sense whether nohz mode has been enabled or disabled via sysfs.  The second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)  * major task is to accelerate (that is, assign grace-period numbers to) any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)  * recently arrived callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)  * The caller must have disabled interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static void rcu_prepare_for_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	bool needwake;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	struct rcu_node *rnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	int tne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	if (rcu_segcblist_is_offloaded(&rdp->cblist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	/* Handle nohz enablement switches conservatively. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	tne = READ_ONCE(tick_nohz_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	if (tne != rdp->tick_nohz_enabled_snap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		if (!rcu_segcblist_empty(&rdp->cblist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 			invoke_rcu_core(); /* force nohz to see update. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		rdp->tick_nohz_enabled_snap = tne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	if (!tne)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	 * If we have not yet accelerated this jiffy, accelerate all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	 * callbacks on this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	if (rdp->last_accelerate == jiffies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	rdp->last_accelerate = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	if (rcu_segcblist_pend_cbs(&rdp->cblist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		rnp = rdp->mynode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		needwake = rcu_accelerate_cbs(rnp, rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		if (needwake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			rcu_gp_kthread_wake();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)  * Clean up for exit from idle.  Attempt to advance callbacks based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)  * any grace periods that elapsed while the CPU was idle, and if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)  * callbacks are now ready to invoke, initiate invocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) static void rcu_cleanup_after_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	if (rcu_segcblist_is_offloaded(&rdp->cblist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	if (rcu_try_advance_all_cbs())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		invoke_rcu_core();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) #ifdef CONFIG_RCU_NOCB_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)  * Offload callback processing from the boot-time-specified set of CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)  * specified by rcu_nocb_mask.  For the CPUs in the set, there are kthreads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)  * created that pull the callbacks from the corresponding CPU, wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)  * a grace period to elapse, and invoke the callbacks.  These kthreads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)  * are organized into GP kthreads, which manage incoming callbacks, wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)  * grace periods, and awaken CB kthreads, and the CB kthreads, which only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)  * invoke callbacks.  Each GP kthread invokes its own CBs.  The no-CBs CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)  * do a wake_up() on their GP kthread when they insert a callback into any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)  * empty list, unless the rcu_nocb_poll boot parameter has been specified,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)  * in which case each kthread actively polls its CPU.  (Which isn't so great
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)  * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)  * This is intended to be used in conjunction with Frederic Weisbecker's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)  * adaptive-idle work, which would seriously reduce OS jitter on CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)  * running CPU-bound user-mode computations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)  * Offloading of callbacks can also be used as an energy-efficiency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)  * measure because CPUs with no RCU callbacks queued are more aggressive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)  * about entering dyntick-idle mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)  * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)  * The string after the "rcu_nocbs=" is either "all" for all CPUs, or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)  * comma-separated list of CPUs and/or CPU ranges.  If an invalid list is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)  * given, a warning is emitted and all CPUs are offloaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static int __init rcu_nocb_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	alloc_bootmem_cpumask_var(&rcu_nocb_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	if (!strcasecmp(str, "all"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		cpumask_setall(rcu_nocb_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		if (cpulist_parse(str, rcu_nocb_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 			pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 			cpumask_setall(rcu_nocb_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) __setup("rcu_nocbs=", rcu_nocb_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) static int __init parse_rcu_nocb_poll(char *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	rcu_nocb_poll = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)  * Don't bother bypassing ->cblist if the call_rcu() rate is low.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)  * After all, the main point of bypassing is to avoid lock contention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)  * on ->nocb_lock, which only can happen at high call_rcu() rates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) module_param(nocb_nobypass_lim_per_jiffy, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)  * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)  * lock isn't immediately available, increment ->nocb_lock_contended to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)  * flag the contention.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	__acquires(&rdp->nocb_bypass_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	if (raw_spin_trylock(&rdp->nocb_bypass_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	atomic_inc(&rdp->nocb_lock_contended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	smp_mb__after_atomic(); /* atomic_inc() before lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	raw_spin_lock(&rdp->nocb_bypass_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	smp_mb__before_atomic(); /* atomic_dec() after lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	atomic_dec(&rdp->nocb_lock_contended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)  * Spinwait until the specified rcu_data structure's ->nocb_lock is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)  * not contended.  Please note that this is extremely special-purpose,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)  * relying on the fact that at most two kthreads and one CPU contend for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)  * this lock, and also that the two kthreads are guaranteed to have frequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)  * grace-period-duration time intervals between successive acquisitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)  * of the lock.  This allows us to use an extremely simple throttling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)  * mechanism, and further to apply it only to the CPU doing floods of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)  * call_rcu() invocations.  Don't try this at home!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static void rcu_nocb_wait_contended(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)  * Conditionally acquire the specified rcu_data structure's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)  * ->nocb_bypass_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	return raw_spin_trylock(&rdp->nocb_bypass_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)  * Release the specified rcu_data structure's ->nocb_bypass_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	__releases(&rdp->nocb_bypass_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	raw_spin_unlock(&rdp->nocb_bypass_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)  * Acquire the specified rcu_data structure's ->nocb_lock, but only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)  * if it corresponds to a no-CBs CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) static void rcu_nocb_lock(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	if (!rcu_segcblist_is_offloaded(&rdp->cblist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	raw_spin_lock(&rdp->nocb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)  * Release the specified rcu_data structure's ->nocb_lock, but only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)  * if it corresponds to a no-CBs CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) static void rcu_nocb_unlock(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		raw_spin_unlock(&rdp->nocb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)  * Release the specified rcu_data structure's ->nocb_lock and restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)  * interrupts, but only if it corresponds to a no-CBs CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 				       unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /* Lockdep check that ->cblist may be safely accessed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	if (rcu_segcblist_is_offloaded(&rdp->cblist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		lockdep_assert_held(&rdp->nocb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)  * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)  * grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	swake_up_all(sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) static void rcu_init_one_nocb(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	init_swait_queue_head(&rnp->nocb_gp_wq[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	init_swait_queue_head(&rnp->nocb_gp_wq[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) /* Is the specified CPU a no-CBs CPU? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) bool rcu_is_nocb_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	if (cpumask_available(rcu_nocb_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		return cpumask_test_cpu(cpu, rcu_nocb_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)  * Kick the GP kthread for this NOCB group.  Caller holds ->nocb_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)  * and this function releases it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static void wake_nocb_gp(struct rcu_data *rdp, bool force,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 			   unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	__releases(rdp->nocb_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	bool needwake = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	lockdep_assert_held(&rdp->nocb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 				    TPS("AlreadyAwake"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		rcu_nocb_unlock_irqrestore(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		del_timer(&rdp->nocb_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	rcu_nocb_unlock_irqrestore(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		needwake = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	if (needwake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		wake_up_process(rdp_gp->nocb_gp_kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)  * Arrange to wake the GP kthread for this NOCB group at some future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)  * time when it is safe to do so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 			       const char *reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		mod_timer(&rdp->nocb_timer, jiffies + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	if (rdp->nocb_defer_wakeup < waketype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		WRITE_ONCE(rdp->nocb_defer_wakeup, waketype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)  * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)  * However, if there is a callback to be enqueued and if ->nocb_bypass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)  * proves to be initially empty, just return false because the no-CB GP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)  * kthread may need to be awakened in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)  * Note that this function always returns true if rhp is NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 				     unsigned long j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	struct rcu_cblist rcl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	WARN_ON_ONCE(!rcu_segcblist_is_offloaded(&rdp->cblist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	rcu_lockdep_assert_cblist_protected(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	lockdep_assert_held(&rdp->nocb_bypass_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		raw_spin_unlock(&rdp->nocb_bypass_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	/* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	if (rhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	WRITE_ONCE(rdp->nocb_bypass_first, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	rcu_nocb_bypass_unlock(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)  * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)  * However, if there is a callback to be enqueued and if ->nocb_bypass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)  * proves to be initially empty, just return false because the no-CB GP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)  * kthread may need to be awakened in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)  * Note that this function always returns true if rhp is NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 				  unsigned long j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	if (!rcu_segcblist_is_offloaded(&rdp->cblist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	rcu_lockdep_assert_cblist_protected(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	rcu_nocb_bypass_lock(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	return rcu_nocb_do_flush_bypass(rdp, rhp, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)  * If the ->nocb_bypass_lock is immediately available, flush the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)  * ->nocb_bypass queue into ->cblist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	rcu_lockdep_assert_cblist_protected(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	if (!rcu_segcblist_is_offloaded(&rdp->cblist) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	    !rcu_nocb_bypass_trylock(rdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)  * See whether it is appropriate to use the ->nocb_bypass list in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)  * to control contention on ->nocb_lock.  A limited number of direct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)  * enqueues are permitted into ->cblist per jiffy.  If ->nocb_bypass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)  * is non-empty, further callbacks must be placed into ->nocb_bypass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)  * otherwise rcu_barrier() breaks.  Use rcu_nocb_flush_bypass() to switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)  * back to direct use of ->cblist.  However, ->nocb_bypass should not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)  * used if ->cblist is empty, because otherwise callbacks can be stranded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)  * on ->nocb_bypass because we cannot count on the current CPU ever again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)  * invoking call_rcu().  The general rule is that if ->nocb_bypass is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)  * non-empty, the corresponding no-CBs grace-period kthread must not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)  * in an indefinite sleep state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)  * Finally, it is not permitted to use the bypass during early boot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)  * as doing so would confuse the auto-initialization code.  Besides
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)  * which, there is no point in worrying about lock contention while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)  * there is only one CPU in operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 				bool *was_alldone, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	unsigned long c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	unsigned long cur_gp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	unsigned long j = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	if (!rcu_segcblist_is_offloaded(&rdp->cblist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		return false; /* Not offloaded, no bypassing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	// Don't use ->nocb_bypass during early boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		rcu_nocb_lock(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	// If we have advanced to a new jiffy, reset counts to allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	// moving back from ->nocb_bypass to ->cblist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	if (j == rdp->nocb_nobypass_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		c = rdp->nocb_nobypass_count + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		WRITE_ONCE(rdp->nocb_nobypass_last, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 				 nocb_nobypass_lim_per_jiffy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 			c = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		else if (c > nocb_nobypass_lim_per_jiffy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 			c = nocb_nobypass_lim_per_jiffy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	WRITE_ONCE(rdp->nocb_nobypass_count, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	// If there hasn't yet been all that many ->cblist enqueues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	// this jiffy, tell the caller to enqueue onto ->cblist.  But flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	// ->nocb_bypass first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		rcu_nocb_lock(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		if (*was_alldone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 					    TPS("FirstQ"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 		WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		return false; // Caller must enqueue the callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	// If ->nocb_bypass has been used too long or is too full,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	// flush ->nocb_bypass to ->cblist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	    ncbs >= qhimark) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		rcu_nocb_lock(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 		if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 			*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 			if (*was_alldone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 				trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 						    TPS("FirstQ"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 			WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 			return false; // Caller must enqueue the callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		if (j != rdp->nocb_gp_adv_time &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		    rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		    rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 			rcu_advance_cbs_nowake(rdp->mynode, rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 			rdp->nocb_gp_adv_time = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		rcu_nocb_unlock_irqrestore(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		return true; // Callback already enqueued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	// We need to use the bypass.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	rcu_nocb_wait_contended(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	rcu_nocb_bypass_lock(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	if (!ncbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		WRITE_ONCE(rdp->nocb_bypass_first, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	rcu_nocb_bypass_unlock(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	smp_mb(); /* Order enqueue before wake. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	if (ncbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		// No-CBs GP kthread might be indefinitely asleep, if so, wake.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 					    TPS("FirstBQwake"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 			__call_rcu_nocb_wake(rdp, true, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 					    TPS("FirstBQnoWake"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 			rcu_nocb_unlock_irqrestore(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	return true; // Callback already enqueued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)  * Awaken the no-CBs grace-period kthead if needed, either due to it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)  * legitimately being asleep or due to overload conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)  * If warranted, also wake up the kthread servicing this CPUs queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 				 unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 				 __releases(rdp->nocb_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	unsigned long cur_gp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	unsigned long j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	struct task_struct *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	// If we are being polled or there is no kthread, just leave.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	t = READ_ONCE(rdp->nocb_gp_kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	if (rcu_nocb_poll || !t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 				    TPS("WakeNotPoll"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		rcu_nocb_unlock_irqrestore(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	// Need to actually to a wakeup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	len = rcu_segcblist_n_cbs(&rdp->cblist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	if (was_alldone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		rdp->qlen_last_fqs_check = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		if (!irqs_disabled_flags(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 			/* ... if queue was empty ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 			wake_nocb_gp(rdp, false, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 					    TPS("WakeEmpty"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 			wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 					   TPS("WakeEmptyIsDeferred"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 			rcu_nocb_unlock_irqrestore(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	} else if (len > rdp->qlen_last_fqs_check + qhimark) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		/* ... or if many callbacks queued. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		rdp->qlen_last_fqs_check = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		j = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		if (j != rdp->nocb_gp_adv_time &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		    rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		    rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 			rcu_advance_cbs_nowake(rdp->mynode, rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 			rdp->nocb_gp_adv_time = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		smp_mb(); /* Enqueue before timer_pending(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		if ((rdp->nocb_cb_sleep ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		     !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		    !timer_pending(&rdp->nocb_bypass_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 			wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 					   TPS("WakeOvfIsDeferred"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		rcu_nocb_unlock_irqrestore(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		rcu_nocb_unlock_irqrestore(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) /* Wake up the no-CBs GP kthread to flush ->nocb_bypass. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) static void do_nocb_bypass_wakeup_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	struct rcu_data *rdp = from_timer(rdp, t, nocb_bypass_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	rcu_nocb_lock_irqsave(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	smp_mb__after_spinlock(); /* Timer expire before wakeup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	__call_rcu_nocb_wake(rdp, true, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)  * No-CBs GP kthreads come here to wait for additional callbacks to show up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)  * or for grace periods to end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) static void nocb_gp_wait(struct rcu_data *my_rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	bool bypass = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	long bypass_ncbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	int __maybe_unused cpu = my_rdp->cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	unsigned long cur_gp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	bool gotcbs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	unsigned long j = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	bool needwait_gp = false; // This prevents actual uninitialized use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	bool needwake;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	bool needwake_gp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	struct rcu_data *rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	struct rcu_node *rnp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	bool wasempty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	 * Each pass through the following loop checks for CBs and for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	 * nearest grace period (if any) to wait for next.  The CB kthreads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	 * and the global grace-period kthread are awakened if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		rcu_nocb_lock_irqsave(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		if (bypass_ncbs &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		    (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		     bypass_ncbs > 2 * qhimark)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 			// Bypass full or old, so flush it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 			(void)rcu_nocb_try_flush_bypass(rdp, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 			bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		} else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 			rcu_nocb_unlock_irqrestore(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 			continue; /* No callbacks here, try next. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		if (bypass_ncbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 					    TPS("Bypass"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 			bypass = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		rnp = rdp->mynode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		if (bypass) {  // Avoid race with first bypass CB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 			WRITE_ONCE(my_rdp->nocb_defer_wakeup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 				   RCU_NOCB_WAKE_NOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 			del_timer(&my_rdp->nocb_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		// Advance callbacks if helpful and low contention.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		needwake_gp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		if (!rcu_segcblist_restempty(&rdp->cblist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 					     RCU_NEXT_READY_TAIL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		    (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		     rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 			raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 			needwake_gp = rcu_advance_cbs(rnp, rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 			wasempty = rcu_segcblist_restempty(&rdp->cblist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 							   RCU_NEXT_READY_TAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 			raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		// Need to wait on some grace period?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		WARN_ON_ONCE(wasempty &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 			     !rcu_segcblist_restempty(&rdp->cblist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 						      RCU_NEXT_READY_TAIL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 			if (!needwait_gp ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 			    ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 				wait_gp_seq = cur_gp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 			needwait_gp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 					    TPS("NeedWaitGP"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 			needwake = rdp->nocb_cb_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 			WRITE_ONCE(rdp->nocb_cb_sleep, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 			smp_mb(); /* CB invocation -after- GP end. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 			needwake = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		rcu_nocb_unlock_irqrestore(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		if (needwake) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 			swake_up_one(&rdp->nocb_cb_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 			gotcbs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		if (needwake_gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 			rcu_gp_kthread_wake();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	my_rdp->nocb_gp_bypass = bypass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	my_rdp->nocb_gp_gp = needwait_gp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	if (bypass && !rcu_nocb_poll) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		// At least one child with non-empty ->nocb_bypass, so set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		// timer in order to avoid stranding its callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		mod_timer(&my_rdp->nocb_bypass_timer, j + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	if (rcu_nocb_poll) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		/* Polling, so trace if first poll in the series. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		if (gotcbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 			trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		schedule_timeout_idle(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	} else if (!needwait_gp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		/* Wait for callbacks to appear. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 				!READ_ONCE(my_rdp->nocb_gp_sleep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		rnp = my_rdp->mynode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		swait_event_interruptible_exclusive(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 			rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 			rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 			!READ_ONCE(my_rdp->nocb_gp_sleep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 		trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	if (!rcu_nocb_poll) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		if (bypass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 			del_timer(&my_rdp->nocb_bypass_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	my_rdp->nocb_gp_seq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	WARN_ON(signal_pending(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)  * No-CBs grace-period-wait kthread.  There is one of these per group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)  * of CPUs, but only once at least one CPU in that group has come online
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)  * at least once since boot.  This kthread checks for newly posted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)  * callbacks from any of the CPUs it is responsible for, waits for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)  * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)  * that then have callback-invocation work to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) static int rcu_nocb_gp_kthread(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	struct rcu_data *rdp = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		nocb_gp_wait(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 		cond_resched_tasks_rcu_qs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)  * Invoke any ready callbacks from the corresponding no-CBs CPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)  * then, if there are no more, wait for more to appear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) static void nocb_cb_wait(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	unsigned long cur_gp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	bool needwake_gp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	struct rcu_node *rnp = rdp->mynode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	rcu_momentary_dyntick_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	rcu_do_batch(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	lockdep_assert_irqs_enabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	rcu_nocb_lock_irqsave(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	    rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	    raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		rcu_nocb_unlock_irqrestore(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		if (needwake_gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 			rcu_gp_kthread_wake();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	WRITE_ONCE(rdp->nocb_cb_sleep, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	rcu_nocb_unlock_irqrestore(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	if (needwake_gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		rcu_gp_kthread_wake();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 				 !READ_ONCE(rdp->nocb_cb_sleep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	if (!smp_load_acquire(&rdp->nocb_cb_sleep)) { /* VVV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		/* ^^^ Ensure CB invocation follows _sleep test. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	WARN_ON(signal_pending(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)  * Per-rcu_data kthread, but only for no-CBs CPUs.  Repeatedly invoke
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)  * nocb_cb_wait() to do the dirty work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) static int rcu_nocb_cb_kthread(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	struct rcu_data *rdp = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	// Each pass through this loop does one callback batch, and,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	// if there are no more ready callbacks, waits for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		nocb_cb_wait(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 		cond_resched_tasks_rcu_qs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) /* Is a deferred wakeup of rcu_nocb_kthread() required? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	return READ_ONCE(rdp->nocb_defer_wakeup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) /* Do a deferred wakeup of rcu_nocb_kthread(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	int ndw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	rcu_nocb_lock_irqsave(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	if (!rcu_nocb_need_deferred_wakeup(rdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 		rcu_nocb_unlock_irqrestore(rdp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	ndw = READ_ONCE(rdp->nocb_defer_wakeup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	do_nocb_deferred_wakeup_common(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)  * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)  * This means we do an inexact common-case check.  Note that if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)  * we miss, ->nocb_timer will eventually clean things up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	if (rcu_nocb_need_deferred_wakeup(rdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		do_nocb_deferred_wakeup_common(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) void rcu_nocb_flush_deferred_wakeup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) void __init rcu_init_nohz(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	bool need_rcu_nocb_mask = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	struct rcu_data *rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) #if defined(CONFIG_NO_HZ_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 		need_rcu_nocb_mask = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) #endif /* #if defined(CONFIG_NO_HZ_FULL) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 			pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	if (!cpumask_available(rcu_nocb_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) #if defined(CONFIG_NO_HZ_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	if (tick_nohz_full_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) #endif /* #if defined(CONFIG_NO_HZ_FULL) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		cpumask_and(rcu_nocb_mask, cpu_possible_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 			    rcu_nocb_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	if (cpumask_empty(rcu_nocb_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 			cpumask_pr_args(rcu_nocb_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	if (rcu_nocb_poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	for_each_cpu(cpu, rcu_nocb_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		rdp = per_cpu_ptr(&rcu_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		if (rcu_segcblist_empty(&rdp->cblist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 			rcu_segcblist_init(&rdp->cblist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		rcu_segcblist_offload(&rdp->cblist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	rcu_organize_nocb_kthreads();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) /* Initialize per-rcu_data variables for no-CBs CPUs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	init_swait_queue_head(&rdp->nocb_cb_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	init_swait_queue_head(&rdp->nocb_gp_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	raw_spin_lock_init(&rdp->nocb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	raw_spin_lock_init(&rdp->nocb_bypass_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	raw_spin_lock_init(&rdp->nocb_gp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	timer_setup(&rdp->nocb_bypass_timer, do_nocb_bypass_wakeup_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	rcu_cblist_init(&rdp->nocb_bypass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)  * If the specified CPU is a no-CBs CPU that does not already have its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)  * rcuo CB kthread, spawn it.  Additionally, if the rcuo GP kthread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)  * for this CPU's group has not yet been created, spawn it as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) static void rcu_spawn_one_nocb_kthread(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	struct rcu_data *rdp_gp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	struct task_struct *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	 * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	 * then nothing to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	/* If we didn't spawn the GP kthread first, reorganize! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	rdp_gp = rdp->nocb_gp_rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	if (!rdp_gp->nocb_gp_kthread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 				"rcuog/%d", rdp_gp->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	/* Spawn the kthread for this CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	t = kthread_run(rcu_nocb_cb_kthread, rdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 			"rcuo%c/%d", rcu_state.abbr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	WRITE_ONCE(rdp->nocb_cb_kthread, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)  * If the specified CPU is a no-CBs CPU that does not already have its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)  * rcuo kthread, spawn it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) static void rcu_spawn_cpu_nocb_kthread(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	if (rcu_scheduler_fully_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		rcu_spawn_one_nocb_kthread(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)  * Once the scheduler is running, spawn rcuo kthreads for all online
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)  * no-CBs CPUs.  This assumes that the early_initcall()s happen before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)  * non-boot CPUs come online -- if this changes, we will need to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)  * some mutual exclusion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) static void __init rcu_spawn_nocb_kthreads(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	for_each_online_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 		rcu_spawn_cpu_nocb_kthread(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) /* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) static int rcu_nocb_gp_stride = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) module_param(rcu_nocb_gp_stride, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)  * Initialize GP-CB relationships for all no-CBs CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) static void __init rcu_organize_nocb_kthreads(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	bool firsttime = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	bool gotnocbs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	bool gotnocbscbs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	int ls = rcu_nocb_gp_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	int nl = 0;  /* Next GP kthread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	struct rcu_data *rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	struct rcu_data *rdp_gp = NULL;  /* Suppress misguided gcc warn. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	struct rcu_data *rdp_prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	if (!cpumask_available(rcu_nocb_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	if (ls == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		rcu_nocb_gp_stride = ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	 * Each pass through this loop sets up one rcu_data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	 * Should the corresponding CPU come online in the future, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	 * we will spawn the needed set of rcu_nocb_kthread() kthreads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	for_each_cpu(cpu, rcu_nocb_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		rdp = per_cpu_ptr(&rcu_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		if (rdp->cpu >= nl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 			/* New GP kthread, set up for CBs & next GP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 			gotnocbs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 			nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 			rdp->nocb_gp_rdp = rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 			rdp_gp = rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 			if (dump_tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 				if (!firsttime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 					pr_cont("%s\n", gotnocbscbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 							? "" : " (self only)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 				gotnocbscbs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 				firsttime = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 				pr_alert("%s: No-CB GP kthread CPU %d:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 					 __func__, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 			/* Another CB kthread, link to previous GP kthread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 			gotnocbscbs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 			rdp->nocb_gp_rdp = rdp_gp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 			rdp_prev->nocb_next_cb_rdp = rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 			if (dump_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 				pr_cont(" %d", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		rdp_prev = rdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	if (gotnocbs && dump_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)  * Bind the current task to the offloaded CPUs.  If there are no offloaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)  * CPUs, leave the task unbound.  Splat if the bind attempt fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) void rcu_bind_current_to_nocb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 		WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)  * Dump out nocb grace-period kthread state for the specified rcu_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)  * structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	struct rcu_node *rnp = rdp->mynode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	pr_info("nocb GP %d %c%c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 		rdp->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 		"kK"[!!rdp->nocb_gp_kthread],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 		"lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 		"dD"[!!rdp->nocb_defer_wakeup],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 		"tT"[timer_pending(&rdp->nocb_timer)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 		"bB"[timer_pending(&rdp->nocb_bypass_timer)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		"sS"[!!rdp->nocb_gp_sleep],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 		".W"[swait_active(&rdp->nocb_gp_wq)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 		".W"[swait_active(&rnp->nocb_gp_wq[0])],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 		".W"[swait_active(&rnp->nocb_gp_wq[1])],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 		".B"[!!rdp->nocb_gp_bypass],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		".G"[!!rdp->nocb_gp_gp],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 		(long)rdp->nocb_gp_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 		rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) /* Dump out nocb kthread state for the specified rcu_data structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) static void show_rcu_nocb_state(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	struct rcu_segcblist *rsclp = &rdp->cblist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	bool waslocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	bool wastimer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	bool wassleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	if (rdp->nocb_gp_rdp == rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		show_rcu_nocb_gp_state(rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	pr_info("   CB %d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%c%c%c q%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 		rdp->cpu, rdp->nocb_gp_rdp->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 		"kK"[!!rdp->nocb_cb_kthread],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 		"bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 		"cC"[!!atomic_read(&rdp->nocb_lock_contended)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 		"lL"[raw_spin_is_locked(&rdp->nocb_lock)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		"sS"[!!rdp->nocb_cb_sleep],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		".W"[swait_active(&rdp->nocb_cb_wq)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		jiffies - rdp->nocb_bypass_first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		jiffies - rdp->nocb_nobypass_last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 		rdp->nocb_nobypass_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 		".D"[rcu_segcblist_ready_cbs(rsclp)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 		".W"[!rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		".R"[!rcu_segcblist_restempty(rsclp, RCU_WAIT_TAIL)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		".N"[!rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		rcu_segcblist_n_cbs(&rdp->cblist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	/* It is OK for GP kthreads to have GP state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	if (rdp->nocb_gp_rdp == rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 	wastimer = timer_pending(&rdp->nocb_bypass_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	wassleep = swait_active(&rdp->nocb_gp_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	if (!rdp->nocb_gp_sleep && !waslocked && !wastimer && !wassleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 		return;  /* Nothing untowards. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	pr_info("   nocb GP activity on CB-only CPU!!! %c%c%c%c %c\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		"lL"[waslocked],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		"dD"[!!rdp->nocb_defer_wakeup],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		"tT"[wastimer],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 		"sS"[!!rdp->nocb_gp_sleep],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 		".W"[wassleep]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) #else /* #ifdef CONFIG_RCU_NOCB_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) /* No ->nocb_lock to acquire.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) static void rcu_nocb_lock(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) /* No ->nocb_lock to release.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) static void rcu_nocb_unlock(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) /* No ->nocb_lock to release.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 				       unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) /* Lockdep check that ->cblist may be safely accessed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	lockdep_assert_irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) static void rcu_init_one_nocb(struct rcu_node *rnp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 				  unsigned long j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 				bool *was_alldone, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 				 unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	WARN_ON_ONCE(1);  /* Should be dead code! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) static void rcu_spawn_cpu_nocb_kthread(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) static void __init rcu_spawn_nocb_kthreads(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) static void show_rcu_nocb_state(struct rcu_data *rdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)  * grace-period kthread will do force_quiescent_state() processing?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)  * The idea is to avoid waking up RCU core processing on such a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)  * CPU unless the grace period has extended for too long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)  * This code relies on the fact that all NO_HZ_FULL CPUs are also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)  * CONFIG_RCU_NOCB_CPU CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) static bool rcu_nohz_full_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) #ifdef CONFIG_NO_HZ_FULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	if (tick_nohz_full_cpu(smp_processor_id()) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	    (!rcu_gp_in_progress() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	     time_before(jiffies, READ_ONCE(rcu_state.gp_start) + HZ)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) #endif /* #ifdef CONFIG_NO_HZ_FULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)  * Bind the RCU grace-period kthreads to the housekeeping CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) static void rcu_bind_gp_kthread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	if (!tick_nohz_full_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	housekeeping_affine(current, HK_FLAG_RCU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) /* Record the current task on dyntick-idle entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) static __always_inline void rcu_dynticks_task_enter(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) /* Record no current task on dyntick-idle exit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) static __always_inline void rcu_dynticks_task_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) /* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) static __always_inline void rcu_dynticks_task_trace_enter(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) #ifdef CONFIG_TASKS_TRACE_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 		current->trc_reader_special.b.need_mb = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) /* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) static __always_inline void rcu_dynticks_task_trace_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) #ifdef CONFIG_TASKS_TRACE_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		current->trc_reader_special.b.need_mb = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) }