Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Read-Copy Update mechanism for mutual exclusion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright IBM Corporation, 2001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *	    Manfred Spraul <manfred@colorfullife.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Papers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * For detailed explanation of Read-Copy Update mechanism see -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *		http://lse.sourceforge.net/locking/rcupdate.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <linux/rcupdate_wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <linux/sched/isolation.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #include <linux/irq_work.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #include <linux/rcupdate_trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #include "rcu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #ifdef MODULE_PARAM_PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #undef MODULE_PARAM_PREFIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define MODULE_PARAM_PREFIX "rcupdate."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #ifndef CONFIG_TINY_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) module_param(rcu_expedited, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) module_param(rcu_normal, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) static int rcu_normal_after_boot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) module_param(rcu_normal_after_boot, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #endif /* #ifndef CONFIG_TINY_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * @ret:	Best guess answer if lockdep cannot be relied on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * Returns true if lockdep must be ignored, in which case ``*ret`` contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * the best guess described below.  Otherwise returns false, in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * case ``*ret`` tells the caller nothing and the caller should instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * consult lockdep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * If CONFIG_DEBUG_LOCK_ALLOC is selected, set ``*ret`` to nonzero iff in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * RCU-sched read-side critical section.  In absence of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * critical section unless it can prove otherwise.  Note that disabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * of preemption (including disabling irqs) counts as an RCU-sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * read-side critical section.  This is useful for debug checks in functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * that required that they be called within an RCU-sched read-side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  * critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * and while lockdep is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * Note that if the CPU is in the idle loop from an RCU point of view (ie:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * that we are in the section between rcu_idle_enter() and rcu_idle_exit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * then rcu_read_lock_held() sets ``*ret`` to false even if the CPU did an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  * rcu_read_lock().  The reason for this is that RCU ignores CPUs that are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * in such a section, considering these as in extended quiescent state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * so such a CPU is effectively never in an RCU read-side critical section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * regardless of what RCU primitives it invokes.  This state of affairs is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * required --- we need to keep an RCU-free window in idle where the CPU may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * possibly enter into low power mode. This way we can notice an extended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * quiescent state to other CPUs that started a grace period. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * we would delay any grace period as long as we run in the idle task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * Similarly, we avoid claiming an RCU read lock held if the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * CPU is offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static bool rcu_read_lock_held_common(bool *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (!debug_lockdep_rcu_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		*ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	if (!rcu_is_watching()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		*ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (!rcu_lockdep_current_cpu_online()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		*ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int rcu_read_lock_sched_held(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	if (rcu_read_lock_held_common(&ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	return lock_is_held(&rcu_sched_lock_map) || !preemptible();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) EXPORT_SYMBOL(rcu_read_lock_sched_held);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #ifndef CONFIG_TINY_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  * Should expedited grace-period primitives always fall back to their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * non-expedited counterparts?  Intended for use within RCU.  Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  * that if the user specifies both rcu_expedited and rcu_normal, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * rcu_normal wins.  (Except during the time period during boot from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * when the first task is spawned until the rcu_set_runtime_mode()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * core_initcall() is invoked, at which point everything is expedited.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) bool rcu_gp_is_normal(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	return READ_ONCE(rcu_normal) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	       rcu_scheduler_active != RCU_SCHEDULER_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  * Should normal grace-period primitives be expedited?  Intended for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  * use within RCU.  Note that this function takes the rcu_expedited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  * sysfs/boot variable and rcu_scheduler_active into account as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  * as the rcu_expedite_gp() nesting.  So looping on rcu_unexpedite_gp()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * until rcu_gp_is_expedited() returns false is a -really- bad idea.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) bool rcu_gp_is_expedited(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	return rcu_expedited || atomic_read(&rcu_expedited_nesting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * rcu_expedite_gp - Expedite future RCU grace periods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * After a call to this function, future calls to synchronize_rcu() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * friends act as the corresponding synchronize_rcu_expedited() function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * had instead been called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) void rcu_expedite_gp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	atomic_inc(&rcu_expedited_nesting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) EXPORT_SYMBOL_GPL(rcu_expedite_gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  * Undo a prior call to rcu_expedite_gp().  If all prior calls to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  * and if the rcu_expedited sysfs/boot parameter is not set, then all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  * subsequent calls to synchronize_rcu() and friends will return to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  * their normal non-expedited behavior.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) void rcu_unexpedite_gp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	atomic_dec(&rcu_expedited_nesting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static bool rcu_boot_ended __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  * Inform RCU of the end of the in-kernel boot sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) void rcu_end_inkernel_boot(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	rcu_unexpedite_gp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	if (rcu_normal_after_boot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		WRITE_ONCE(rcu_normal, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	rcu_boot_ended = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  * Let rcutorture know when it is OK to turn it up to eleven.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) bool rcu_inkernel_boot_has_ended(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	return rcu_boot_ended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #endif /* #ifndef CONFIG_TINY_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  * Test each non-SRCU synchronous grace-period wait API.  This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)  * useful just after a change in mode for these primitives, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)  * during early boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void rcu_test_sync_prims(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if (!IS_ENABLED(CONFIG_PROVE_RCU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	synchronize_rcu_expedited();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * Switch to run-time mode once RCU has fully initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static int __init rcu_set_runtime_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	rcu_test_sync_prims();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	kfree_rcu_scheduler_running();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	rcu_test_sync_prims();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) core_initcall(rcu_set_runtime_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static struct lock_class_key rcu_lock_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct lockdep_map rcu_lock_map = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	.name = "rcu_read_lock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	.key = &rcu_lock_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	.wait_type_outer = LD_WAIT_FREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	.wait_type_inner = LD_WAIT_CONFIG, /* XXX PREEMPT_RCU ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) EXPORT_SYMBOL_GPL(rcu_lock_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static struct lock_class_key rcu_bh_lock_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct lockdep_map rcu_bh_lock_map = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	.name = "rcu_read_lock_bh",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	.key = &rcu_bh_lock_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	.wait_type_outer = LD_WAIT_FREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	.wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_LOCK also makes BH preemptible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static struct lock_class_key rcu_sched_lock_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct lockdep_map rcu_sched_lock_map = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	.name = "rcu_read_lock_sched",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	.key = &rcu_sched_lock_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	.wait_type_outer = LD_WAIT_FREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	.wait_type_inner = LD_WAIT_SPIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) // Tell lockdep when RCU callbacks are being invoked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static struct lock_class_key rcu_callback_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct lockdep_map rcu_callback_map =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) EXPORT_SYMBOL_GPL(rcu_callback_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) noinstr int notrace debug_lockdep_rcu_enabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && READ_ONCE(debug_locks) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	       current->lockdep_recursion == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  * rcu_read_lock_held() - might we be in RCU read-side critical section?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * this assumes we are in an RCU read-side critical section unless it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * prove otherwise.  This is useful for debug checks in functions that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * require that they be called within an RCU read-side critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  * and while lockdep is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  * Note that rcu_read_lock() and the matching rcu_read_unlock() must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  * occur in the same context, for example, it is illegal to invoke
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  * rcu_read_unlock() in process context if the matching rcu_read_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  * was invoked from within an irq handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  * Note that rcu_read_lock() is disallowed if the CPU is either idle or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  * offline from an RCU perspective, so check for those as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int rcu_read_lock_held(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	if (rcu_read_lock_held_common(&ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	return lock_is_held(&rcu_lock_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) EXPORT_SYMBOL_GPL(rcu_read_lock_held);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)  * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)  * Check for bottom half being disabled, which covers both the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  * CONFIG_PROVE_RCU and not cases.  Note that if someone uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  * will show the situation.  This is useful for debug checks in functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  * that require that they be called within an RCU read-side critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  * offline from an RCU perspective, so check for those as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int rcu_read_lock_bh_held(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	if (rcu_read_lock_held_common(&ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	return in_softirq() || irqs_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) int rcu_read_lock_any_held(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	if (rcu_read_lock_held_common(&ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	if (lock_is_held(&rcu_lock_map) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	    lock_is_held(&rcu_bh_lock_map) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	    lock_is_held(&rcu_sched_lock_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	return !preemptible();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  * wakeme_after_rcu() - Callback function to awaken a task after grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  * @head: Pointer to rcu_head member within rcu_synchronize structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  * Awaken the corresponding task now that a grace period has elapsed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) void wakeme_after_rcu(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	struct rcu_synchronize *rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	rcu = container_of(head, struct rcu_synchronize, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	complete(&rcu->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) EXPORT_SYMBOL_GPL(wakeme_after_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		   struct rcu_synchronize *rs_array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	/* Initialize and register callbacks for each crcu_array element. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		if (checktiny &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		    (crcu_array[i] == call_rcu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			if (crcu_array[j] == crcu_array[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		if (j == i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 			init_rcu_head_on_stack(&rs_array[i].head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 			init_completion(&rs_array[i].completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 			(crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	/* Wait for all callbacks to be invoked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		if (checktiny &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		    (crcu_array[i] == call_rcu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			if (crcu_array[j] == crcu_array[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		if (j == i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 			wait_for_completion(&rs_array[i].completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 			destroy_rcu_head_on_stack(&rs_array[i].head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) EXPORT_SYMBOL_GPL(__wait_rcu_gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) void init_rcu_head(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	debug_object_init(head, &rcuhead_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) EXPORT_SYMBOL_GPL(init_rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) void destroy_rcu_head(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	debug_object_free(head, &rcuhead_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) EXPORT_SYMBOL_GPL(destroy_rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static bool rcuhead_is_static_object(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)  * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)  * @head: pointer to rcu_head structure to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)  * This function informs debugobjects of a new rcu_head structure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)  * has been allocated as an auto variable on the stack.  This function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)  * is not required for rcu_head structures that are statically defined or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)  * that are dynamically allocated on the heap.  This function has no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)  * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) void init_rcu_head_on_stack(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	debug_object_init_on_stack(head, &rcuhead_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)  * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)  * @head: pointer to rcu_head structure to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)  * This function informs debugobjects that an on-stack rcu_head structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)  * is about to go out of scope.  As with init_rcu_head_on_stack(), this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)  * function is not required for rcu_head structures that are statically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)  * defined or that are dynamically allocated on the heap.  Also as with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)  * init_rcu_head_on_stack(), this function has no effect for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)  * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) void destroy_rcu_head_on_stack(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	debug_object_free(head, &rcuhead_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) const struct debug_obj_descr rcuhead_debug_descr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	.name = "rcu_head",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	.is_static_object = rcuhead_is_static_object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) #if defined(CONFIG_TREE_RCU) || defined(CONFIG_RCU_TRACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 			       unsigned long secs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 			       unsigned long c_old, unsigned long c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /* Get rcutorture access to sched_setaffinity(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	ret = sched_setaffinity(pid, in_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #ifdef CONFIG_RCU_STALL_COMMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) int rcu_cpu_stall_ftrace_dump __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int rcu_cpu_stall_suppress __read_mostly; // !0 = suppress stall warnings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) module_param(rcu_cpu_stall_suppress, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) module_param(rcu_cpu_stall_timeout, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) // Suppress boot-time RCU CPU stall warnings and rcutorture writer stall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) // warnings.  Also used by rcutorture even if stall warnings are excluded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) int rcu_cpu_stall_suppress_at_boot __read_mostly; // !0 = suppress boot stalls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_boot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) module_param(rcu_cpu_stall_suppress_at_boot, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) #ifdef CONFIG_PROVE_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)  * Early boot self test parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static bool rcu_self_test;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) module_param(rcu_self_test, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static int rcu_self_test_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static void test_callback(struct rcu_head *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	rcu_self_test_counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) DEFINE_STATIC_SRCU(early_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct early_boot_kfree_rcu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	struct rcu_head rh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static void early_boot_test_call_rcu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	static struct rcu_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	static struct rcu_head shead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	struct early_boot_kfree_rcu *rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	call_rcu(&head, test_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	if (IS_ENABLED(CONFIG_SRCU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		call_srcu(&early_srcu, &shead, test_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	if (!WARN_ON_ONCE(!rhp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		kfree_rcu(rhp, rh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) void rcu_early_boot_tests(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	pr_info("Running RCU self tests\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	if (rcu_self_test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		early_boot_test_call_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	rcu_test_sync_prims();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static int rcu_verify_early_boot_tests(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	int early_boot_test_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	if (rcu_self_test) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		early_boot_test_counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		if (IS_ENABLED(CONFIG_SRCU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 			early_boot_test_counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 			srcu_barrier(&early_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	if (rcu_self_test_counter != early_boot_test_counter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) late_initcall(rcu_verify_early_boot_tests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) void rcu_early_boot_tests(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #endif /* CONFIG_PROVE_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #include "tasks.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) #ifndef CONFIG_TINY_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)  * Print any significant non-default boot-time settings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) void __init rcupdate_announce_bootup_oddness(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	if (rcu_normal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		pr_info("\tNo expedited grace period (rcu_normal).\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	else if (rcu_normal_after_boot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	else if (rcu_expedited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	if (rcu_cpu_stall_suppress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	rcu_tasks_bootup_oddness();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) #endif /* #ifndef CONFIG_TINY_RCU */