Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Sleepable Read-Copy Update mechanism for mutual exclusion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *	tiny version for non-preemptible single-CPU use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) IBM Corporation, 2017
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Author: Paul McKenney <paulmck@linux.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/preempt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/rcupdate_wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/srcu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/rcu_node_tree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "rcu_segcblist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "rcu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) int rcu_scheduler_active __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static LIST_HEAD(srcu_boot_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) static bool srcu_init_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static int init_srcu_struct_fields(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	ssp->srcu_lock_nesting[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	ssp->srcu_lock_nesting[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	init_swait_queue_head(&ssp->srcu_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	ssp->srcu_cb_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	ssp->srcu_cb_tail = &ssp->srcu_cb_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	ssp->srcu_gp_running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	ssp->srcu_gp_waiting = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	ssp->srcu_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	ssp->srcu_idx_max = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	INIT_LIST_HEAD(&ssp->srcu_work.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		       struct lock_class_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	/* Don't re-initialize a lock while it is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	lockdep_init_map(&ssp->dep_map, name, key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	return init_srcu_struct_fields(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) EXPORT_SYMBOL_GPL(__init_srcu_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * init_srcu_struct - initialize a sleep-RCU structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * @ssp: structure to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * Must invoke this on a given srcu_struct before passing that srcu_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * to any other function.  Each srcu_struct represents a separate domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * of SRCU protection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) int init_srcu_struct(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	return init_srcu_struct_fields(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) EXPORT_SYMBOL_GPL(init_srcu_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * cleanup_srcu_struct - deconstruct a sleep-RCU structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * @ssp: structure to clean up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * Must invoke this after you are finished using a given srcu_struct that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * was initialized via init_srcu_struct(), else you leak memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) void cleanup_srcu_struct(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	flush_work(&ssp->srcu_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	WARN_ON(ssp->srcu_gp_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	WARN_ON(ssp->srcu_gp_waiting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	WARN_ON(ssp->srcu_cb_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	WARN_ON(ssp->srcu_idx != ssp->srcu_idx_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	WARN_ON(ssp->srcu_idx & 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * Removes the count for the old reader from the appropriate element of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * the srcu_struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	int newval = ssp->srcu_lock_nesting[idx] - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (!newval && READ_ONCE(ssp->srcu_gp_waiting))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		swake_up_one(&ssp->srcu_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) EXPORT_SYMBOL_GPL(__srcu_read_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * Workqueue handler to drive one grace period and invoke any callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * that become ready as a result.  Single-CPU and !PREEMPTION operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * means that we get away with murder on synchronization.  ;-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) void srcu_drive_gp(struct work_struct *wp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	struct rcu_head *lh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct rcu_head *rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	struct srcu_struct *ssp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	ssp = container_of(wp, struct srcu_struct, srcu_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	if (ssp->srcu_gp_running || USHORT_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		return; /* Already running or nothing to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	/* Remove recently arrived callbacks and wait for readers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	WRITE_ONCE(ssp->srcu_gp_running, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	lh = ssp->srcu_cb_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	ssp->srcu_cb_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	ssp->srcu_cb_tail = &ssp->srcu_cb_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	idx = (ssp->srcu_idx & 0x2) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	WRITE_ONCE(ssp->srcu_gp_waiting, true);  /* srcu_read_unlock() wakes! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	/* Invoke the callbacks we removed above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	while (lh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		rhp = lh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		lh = lh->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		rhp->func(rhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	 * Enable rescheduling, and if there are more callbacks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 * reschedule ourselves.  This can race with a call_srcu()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	 * at interrupt level, but the ->srcu_gp_running checks will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	 * straighten that out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	WRITE_ONCE(ssp->srcu_gp_running, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	if (USHORT_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		schedule_work(&ssp->srcu_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) EXPORT_SYMBOL_GPL(srcu_drive_gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	unsigned short cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	cookie = get_state_synchronize_srcu(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	if (USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	WRITE_ONCE(ssp->srcu_idx_max, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	if (!READ_ONCE(ssp->srcu_gp_running)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		if (likely(srcu_init_done))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			schedule_work(&ssp->srcu_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		else if (list_empty(&ssp->srcu_work.entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			list_add(&ssp->srcu_work.entry, &srcu_boot_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  * Enqueue an SRCU callback on the specified srcu_struct structure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  * initiating grace-period processing if it is not already running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	       rcu_callback_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	rhp->func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	rhp->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	*ssp->srcu_cb_tail = rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	ssp->srcu_cb_tail = &rhp->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	srcu_gp_start_if_needed(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) EXPORT_SYMBOL_GPL(call_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  * synchronize_srcu - wait for prior SRCU read-side critical-section completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) void synchronize_srcu(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	struct rcu_synchronize rs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	init_rcu_head_on_stack(&rs.head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	init_completion(&rs.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	call_srcu(ssp, &rs.head, wakeme_after_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	wait_for_completion(&rs.completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	destroy_rcu_head_on_stack(&rs.head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) EXPORT_SYMBOL_GPL(synchronize_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	return ret & USHRT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)  * start_poll_synchronize_srcu - Provide cookie and start grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  * The difference between this and get_state_synchronize_srcu() is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  * this function ensures that the poll_state_synchronize_srcu() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  * eventually return the value true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	unsigned long ret = get_state_synchronize_srcu(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	srcu_gp_start_if_needed(ssp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  * poll_state_synchronize_srcu - Has cookie's grace period ended?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	bool ret = USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx), cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* Lockdep diagnostics.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void __init rcu_scheduler_starting(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  * Queue work for srcu_struct structures with early boot callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * The work won't actually execute until the workqueue initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  * phase that takes place after the scheduler starts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) void __init srcu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	struct srcu_struct *ssp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	srcu_init_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	while (!list_empty(&srcu_boot_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		ssp = list_first_entry(&srcu_boot_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 				      struct srcu_struct, srcu_work.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		list_del_init(&ssp->srcu_work.entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		schedule_work(&ssp->srcu_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }