Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * RCU-based infrastructure for lightweight reader-writer locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) 2015, Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Author: Oleg Nesterov <oleg@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/rcu_sync.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EXIT, GP_REPLAY };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define	rss_lock	gp_wait.lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * rcu_sync_init() - Initialize an rcu_sync structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * @rsp: Pointer to rcu_sync structure to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) void rcu_sync_init(struct rcu_sync *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	memset(rsp, 0, sizeof(*rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	init_waitqueue_head(&rsp->gp_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * rcu_sync_enter_start - Force readers onto slow path for multiple updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * @rsp: Pointer to rcu_sync structure to use for synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * Must be called after rcu_sync_init() and before first use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * pairs turn into NO-OPs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) void rcu_sync_enter_start(struct rcu_sync *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	rsp->gp_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	rsp->gp_state = GP_PASSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static void rcu_sync_func(struct rcu_head *rhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) static void rcu_sync_call(struct rcu_sync *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	call_rcu(&rsp->cb_head, rcu_sync_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * rcu_sync_func() - Callback function managing reader access to fastpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * This function is passed to call_rcu() function by rcu_sync_enter() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * rcu_sync_exit(), so that it is invoked after a grace period following the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * that invocation of enter/exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * If it is called by rcu_sync_enter() it signals that all the readers were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * switched onto slow path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * If it is called by rcu_sync_exit() it takes action based on events that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * have taken place in the meantime, so that closely spaced rcu_sync_enter()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * and rcu_sync_exit() pairs need not wait for a grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * If another rcu_sync_enter() is invoked before the grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * ended, reset state to allow the next rcu_sync_exit() to let the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * readers back onto their fastpaths (after a grace period).  If both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * before the grace period ended, re-invoke call_rcu() on behalf of that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * rcu_sync_exit().  Otherwise, set all state back to idle so that readers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * can again use their fastpaths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static void rcu_sync_func(struct rcu_head *rhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	spin_lock_irqsave(&rsp->rss_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (rsp->gp_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		 * We're at least a GP after the GP_IDLE->GP_ENTER transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		WRITE_ONCE(rsp->gp_state, GP_PASSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		wake_up_locked(&rsp->gp_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	} else if (rsp->gp_state == GP_REPLAY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		 * A new rcu_sync_exit() has happened; requeue the callback to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		 * catch a later GP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		WRITE_ONCE(rsp->gp_state, GP_EXIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		rcu_sync_call(rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		 * We're at least a GP after the last rcu_sync_exit(); eveybody
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		 * will now have observed the write side critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		 * Let 'em rip!.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		WRITE_ONCE(rsp->gp_state, GP_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	spin_unlock_irqrestore(&rsp->rss_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * rcu_sync_enter() - Force readers onto slowpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * @rsp: Pointer to rcu_sync structure to use for synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * This function is used by updaters who need readers to make use of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * a slowpath during the update.  After this function returns, all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * subsequent calls to rcu_sync_is_idle() will return false, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * tells readers to stay off their fastpaths.  A later call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  * rcu_sync_exit() re-enables reader slowpaths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * When called in isolation, rcu_sync_enter() must wait for a grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  * period, however, closely spaced calls to rcu_sync_enter() can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  * optimize away the grace-period wait via a state machine implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) void rcu_sync_enter(struct rcu_sync *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	int gp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	spin_lock_irq(&rsp->rss_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	gp_state = rsp->gp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	if (gp_state == GP_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		WRITE_ONCE(rsp->gp_state, GP_ENTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		WARN_ON_ONCE(rsp->gp_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		 * Note that we could simply do rcu_sync_call(rsp) here and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		 * avoid the "if (gp_state == GP_IDLE)" block below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		 * However, synchronize_rcu() can be faster if rcu_expedited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		 * or rcu_blocking_is_gp() is true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		 * Another reason is that we can't wait for rcu callback if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		 * we are called at early boot time but this shouldn't happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	rsp->gp_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	spin_unlock_irq(&rsp->rss_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	if (gp_state == GP_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		 * See the comment above, this simply does the "synchronous"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		 * call_rcu(rcu_sync_func) which does GP_ENTER -> GP_PASSED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		rcu_sync_func(&rsp->cb_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		/* Not really needed, wait_event() would see GP_PASSED. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	wait_event(rsp->gp_wait, READ_ONCE(rsp->gp_state) >= GP_PASSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * rcu_sync_exit() - Allow readers back onto fast path after grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * @rsp: Pointer to rcu_sync structure to use for synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * This function is used by updaters who have completed, and can therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * now allow readers to make use of their fastpaths after a grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * has elapsed.  After this grace period has completed, all subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * calls to rcu_sync_is_idle() will return true, which tells readers that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * they can once again use their fastpaths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) void rcu_sync_exit(struct rcu_sync *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	spin_lock_irq(&rsp->rss_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	if (!--rsp->gp_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		if (rsp->gp_state == GP_PASSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			WRITE_ONCE(rsp->gp_state, GP_EXIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 			rcu_sync_call(rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		} else if (rsp->gp_state == GP_EXIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			WRITE_ONCE(rsp->gp_state, GP_REPLAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	spin_unlock_irq(&rsp->rss_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * rcu_sync_dtor() - Clean up an rcu_sync structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * @rsp: Pointer to rcu_sync structure to be cleaned up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void rcu_sync_dtor(struct rcu_sync *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	int gp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	spin_lock_irq(&rsp->rss_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	if (rsp->gp_state == GP_REPLAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		WRITE_ONCE(rsp->gp_state, GP_EXIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	gp_state = rsp->gp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	spin_unlock_irq(&rsp->rss_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (gp_state != GP_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		WARN_ON_ONCE(rsp->gp_state != GP_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }