^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corporation, 2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Paul E. McKenney <paulmck@linux.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * For detailed explanation of Read-Copy Update mechanism see -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Documentation/RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/rcupdate_wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "rcu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* Global control variables for rcupdate callback mechanism. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct rcu_ctrlblk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct rcu_head **curtail; /* ->next pointer of last CB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Definition for rcupdate control block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static struct rcu_ctrlblk rcu_ctrlblk = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) .donetail = &rcu_ctrlblk.rcucblist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) .curtail = &rcu_ctrlblk.rcucblist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void rcu_barrier(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) wait_rcu_gp(call_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) EXPORT_SYMBOL(rcu_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* Record an rcu quiescent state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void rcu_qs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) raise_softirq_irqoff(RCU_SOFTIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * Check to see if the scheduling-clock interrupt came from an extended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * quiescent state, and, if so, tell RCU about it. This function must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * be called from hardirq context. It is normally called from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * scheduling-clock interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) void rcu_sched_clock_irq(int user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) rcu_qs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) set_tsk_need_resched(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) set_preempt_need_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Reclaim the specified callback, either by invoking it for non-kfree cases or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static inline bool rcu_reclaim_tiny(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) rcu_callback_t f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) unsigned long offset = (unsigned long)head->func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) rcu_lock_acquire(&rcu_callback_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (__is_kvfree_rcu_offset(offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) trace_rcu_invoke_kvfree_callback("", head, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) kvfree((void *)head - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) rcu_lock_release(&rcu_callback_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) trace_rcu_invoke_callback("", head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) f = head->func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) WRITE_ONCE(head->func, (rcu_callback_t)0L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) f(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) rcu_lock_release(&rcu_callback_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Invoke the RCU callbacks whose grace period has elapsed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct rcu_head *next, *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Move the ready-to-invoke callbacks to a local list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* No callbacks ready, so just leave. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) list = rcu_ctrlblk.rcucblist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *rcu_ctrlblk.donetail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* Invoke the callbacks on the local list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) while (list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) next = list->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) prefetch(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) debug_rcu_head_unqueue(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) rcu_reclaim_tiny(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) list = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Wait for a grace period to elapse. But it is illegal to invoke
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * synchronize_rcu() from within an RCU read-side critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Therefore, any legal call to synchronize_rcu() is a quiescent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * state, and so on a UP system, synchronize_rcu() need do nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * (But Lai Jiangshan points out the benefits of doing might_sleep()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * to reduce latency.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Cool, huh? (Due to Josh Triplett.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void synchronize_rcu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) lock_is_held(&rcu_lock_map) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) lock_is_held(&rcu_sched_lock_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) "Illegal synchronize_rcu() in RCU read-side critical section");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) EXPORT_SYMBOL_GPL(synchronize_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Post an RCU callback to be invoked after the end of an RCU grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * period. But since we have but one CPU, that would be after any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * quiescent state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) void call_rcu(struct rcu_head *head, rcu_callback_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) debug_rcu_head_queue(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) head->func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) head->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) *rcu_ctrlblk.curtail = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) rcu_ctrlblk.curtail = &head->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (unlikely(is_idle_task(current))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* force scheduling for rcu_qs() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) resched_cpu(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) EXPORT_SYMBOL_GPL(call_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) void __init rcu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) rcu_early_boot_tests();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) srcu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }