^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0+ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Task-based RCU implementations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2020 Paul E. McKenney
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #ifdef CONFIG_TASKS_RCU_GENERIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) ////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) // Generic data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct rcu_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) typedef void (*pregp_func_t)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) typedef void (*postscan_func_t)(struct list_head *hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Definition for a Tasks-RCU-like mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * @cbs_head: Head of callback list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * @cbs_tail: Tail pointer for callback list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * @cbs_wq: Wait queue allowning new callback to get kthread's attention.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * @cbs_lock: Lock protecting callback list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * @gp_func: This flavor's grace-period-wait function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * @gp_state: Grace period's most recent state transition (debugging).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * @init_fract: Initial backoff sleep interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * @gp_jiffies: Time of last @gp_state transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * @gp_start: Most recent grace-period start in jiffies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * @n_gps: Number of grace periods completed since boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * @n_ipis: Number of IPIs sent to encourage grace periods to end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * @n_ipis_fails: Number of IPI-send failures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * @pregp_func: This flavor's pre-grace-period function (optional).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * @pertask_func: This flavor's per-task scan function (optional).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * @postscan_func: This flavor's post-task scan function (optional).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * @holdout_func: This flavor's holdout-list scan function (optional).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * @postgp_func: This flavor's post-grace-period function (optional).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * @call_func: This flavor's call_rcu()-equivalent function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * @name: This flavor's textual name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * @kname: This flavor's kthread name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct rcu_tasks {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct rcu_head *cbs_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct rcu_head **cbs_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct wait_queue_head cbs_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) raw_spinlock_t cbs_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int gp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int gp_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int init_fract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long gp_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned long gp_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned long n_gps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned long n_ipis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned long n_ipis_fails;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct task_struct *kthread_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) rcu_tasks_gp_func_t gp_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) pregp_func_t pregp_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) pertask_func_t pertask_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) postscan_func_t postscan_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) holdouts_func_t holdouts_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) postgp_func_t postgp_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) call_rcu_func_t call_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) char *kname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static struct rcu_tasks rt_name = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) .cbs_tail = &rt_name.cbs_head, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) .cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) .gp_func = gp, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) .call_func = call, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) .name = n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) .kname = #rt_name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* Track exiting tasks in order to allow them to be waited for. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* Avoid IPIing CPUs early in the grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) module_param(rcu_task_ipi_delay, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) module_param(rcu_task_stall_timeout, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* RCU tasks grace-period state for debugging. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define RTGS_INIT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define RTGS_WAIT_WAIT_CBS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define RTGS_WAIT_GP 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define RTGS_PRE_WAIT_GP 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define RTGS_SCAN_TASKLIST 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define RTGS_POST_SCAN_TASKLIST 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define RTGS_WAIT_SCAN_HOLDOUTS 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define RTGS_SCAN_HOLDOUTS 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define RTGS_POST_GP 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define RTGS_WAIT_READERS 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define RTGS_INVOKE_CBS 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define RTGS_WAIT_CBS 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #ifndef CONFIG_TINY_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static const char * const rcu_tasks_gp_state_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) "RTGS_INIT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) "RTGS_WAIT_WAIT_CBS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) "RTGS_WAIT_GP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) "RTGS_PRE_WAIT_GP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) "RTGS_SCAN_TASKLIST",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) "RTGS_POST_SCAN_TASKLIST",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) "RTGS_WAIT_SCAN_HOLDOUTS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) "RTGS_SCAN_HOLDOUTS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) "RTGS_POST_GP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) "RTGS_WAIT_READERS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) "RTGS_INVOKE_CBS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) "RTGS_WAIT_CBS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #endif /* #ifndef CONFIG_TINY_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) // Generic code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Record grace-period phase and time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) rtp->gp_state = newstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) rtp->gp_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #ifndef CONFIG_TINY_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* Return state name. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int i = data_race(rtp->gp_state); // Let KCSAN detect update races
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int j = READ_ONCE(i); // Prevent the compiler from reading twice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return "???";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return rcu_tasks_gp_state_names[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #endif /* #ifndef CONFIG_TINY_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) // Enqueue a callback for the specified flavor of Tasks RCU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct rcu_tasks *rtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) bool needwake;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) rhp->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) rhp->func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) needwake = !rtp->cbs_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) WRITE_ONCE(*rtp->cbs_tail, rhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) rtp->cbs_tail = &rhp->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* We can't create the thread unless interrupts are enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (needwake && READ_ONCE(rtp->kthread_ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) wake_up(&rtp->cbs_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) // Wait for a grace period for the specified flavor of Tasks RCU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* Complain if the scheduler has not started. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) "synchronize_rcu_tasks called too soon");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Wait for the grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) wait_rcu_gp(rtp->call_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static int __noreturn rcu_tasks_kthread(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct rcu_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct rcu_head *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct rcu_tasks *rtp = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) housekeeping_affine(current, HK_FLAG_RCU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Each pass through the following loop makes one check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * newly arrived callbacks, and, if there are some, waits for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * one RCU-tasks grace period and then invokes the callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * This loop is terminated by the system going down. ;-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* Pick up any new callbacks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) smp_mb__after_spinlock(); // Order updates vs. GP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) list = rtp->cbs_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) rtp->cbs_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) rtp->cbs_tail = &rtp->cbs_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* If there were none, wait a bit and start over. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) wait_event_interruptible(rtp->cbs_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) READ_ONCE(rtp->cbs_head));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (!rtp->cbs_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) WARN_ON(signal_pending(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) schedule_timeout_idle(HZ/10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) // Wait for one grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) set_tasks_gp_state(rtp, RTGS_WAIT_GP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) rtp->gp_start = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) rtp->gp_func(rtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) rtp->n_gps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* Invoke the callbacks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) while (list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) next = list->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) list->func(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) list = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* Paranoid sleep to keep this from entering a tight loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) schedule_timeout_idle(rtp->gp_sleep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* Spawn RCU-tasks grace-period kthread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct task_struct *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) smp_mb(); /* Ensure others see full kthread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #ifndef CONFIG_TINY_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Print any non-default Tasks RCU settings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static void __init rcu_tasks_bootup_oddness(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #endif /* #ifdef CONFIG_TASKS_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) #ifdef CONFIG_TASKS_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) #endif /* #ifdef CONFIG_TASKS_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #ifdef CONFIG_TASKS_RUDE_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) pr_info("\tRude variant of Tasks RCU enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #ifdef CONFIG_TASKS_TRACE_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) pr_info("\tTracing variant of Tasks RCU enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #endif /* #ifndef CONFIG_TINY_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #ifndef CONFIG_TINY_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) rtp->kname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) jiffies - data_race(rtp->gp_jiffies),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) data_race(rtp->n_gps),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ".k"[!!data_race(rtp->kthread_ptr)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ".C"[!!data_race(rtp->cbs_head)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #endif /* #ifndef CONFIG_TINY_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void exit_tasks_rcu_finish_trace(struct task_struct *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) // Shared code between task-list-scanning variants of Tasks RCU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Wait for one RCU-tasks grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct task_struct *g, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned long lastreport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) LIST_HEAD(holdouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int fract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) rtp->pregp_func();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * There were callbacks, so we need to wait for an RCU-tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * grace period. Start off by scanning the task list for tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * that are not already voluntarily blocked. Mark these tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * and make a list of them in holdouts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) for_each_process_thread(g, t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) rtp->pertask_func(t, &holdouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) rtp->postscan_func(&holdouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * Each pass through the following loop scans the list of holdout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * tasks, removing any that are no longer holdouts. When the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * is empty, we are done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) lastreport = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) // Start off with initial wait and slowly back off to 1 HZ wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) fract = rtp->init_fract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (fract > HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) fract = HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) bool firstreport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) bool needreport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) int rtst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (list_empty(&holdouts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* Slowly back off waiting for holdouts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) schedule_timeout_idle(HZ/fract);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (fract > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) fract--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) rtst = READ_ONCE(rcu_task_stall_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (needreport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) lastreport = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) firstreport = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) WARN_ON(signal_pending(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) rtp->holdouts_func(&holdouts, needreport, &firstreport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) set_tasks_gp_state(rtp, RTGS_POST_GP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) rtp->postgp_func(rtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) #ifdef CONFIG_TASKS_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) // Simple variant of RCU whose quiescent states are voluntary context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) // switch, cond_resched_rcu_qs(), user-space execution, and idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) // As such, grace periods can take one good long time. There are no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) // because this implementation is intended to get the system into a safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) // state for some of the manipulations involved in tracing and the like.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) // Finally, this implementation does not support high call_rcu_tasks()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) // rates from multiple CPUs. If this is required, per-CPU callback lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) // will be needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* Pre-grace-period preparation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static void rcu_tasks_pregp_step(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * Wait for all pre-existing t->on_rq and t->nvcsw transitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * to complete. Invoking synchronize_rcu() suffices because all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * these transitions occur with interrupts disabled. Without this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * synchronize_rcu(), a read-side critical section that started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * before the grace period might be incorrectly seen as having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * started after the grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * This synchronize_rcu() also dispenses with the need for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * memory barrier on the first store to t->rcu_tasks_holdout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * as it forces the store to happen after the beginning of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* Per-task initial processing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) get_task_struct(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) WRITE_ONCE(t->rcu_tasks_holdout, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) list_add(&t->rcu_tasks_holdout_list, hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* Processing between scanning taskslist and draining the holdout list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static void rcu_tasks_postscan(struct list_head *hop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * Wait for tasks that are in the process of exiting. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * does only part of the job, ensuring that all tasks that were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * previously exiting reach the point where they have disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * preemption, allowing the later synchronize_rcu() to finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * the job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) synchronize_srcu(&tasks_rcu_exit_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /* See if tasks are still holding out, complain if so. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static void check_holdout_task(struct task_struct *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) bool needreport, bool *firstreport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (!READ_ONCE(t->rcu_tasks_holdout) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) !READ_ONCE(t->on_rq) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) WRITE_ONCE(t->rcu_tasks_holdout, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) list_del_init(&t->rcu_tasks_holdout_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) put_task_struct(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) rcu_request_urgent_qs_task(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (!needreport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (*firstreport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) *firstreport = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) cpu = task_cpu(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) t, ".I"[is_idle_task(t)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) t->rcu_tasks_idle_cpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) sched_show_task(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* Scan the holdout lists for tasks no longer holding out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void check_all_holdout_tasks(struct list_head *hop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) bool needreport, bool *firstreport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct task_struct *t, *t1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) check_holdout_task(t, needreport, firstreport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Finish off the Tasks-RCU grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static void rcu_tasks_postgp(struct rcu_tasks *rtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * Because ->on_rq and ->nvcsw are not guaranteed to have a full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * memory barriers prior to them in the schedule() path, memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * reordering on other CPUs could cause their RCU-tasks read-side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * critical sections to extend past the end of the grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * However, because these ->nvcsw updates are carried out with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * interrupts disabled, we can use synchronize_rcu() to force the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * needed ordering on all such CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * This synchronize_rcu() also confines all ->rcu_tasks_holdout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * accesses to be within the grace period, avoiding the need for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * memory barriers for ->rcu_tasks_holdout accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * In addition, this synchronize_rcu() waits for exiting tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * to complete their final preempt_disable() region of execution,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * cleaning up after the synchronize_srcu() above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * @rhp: structure to be used for queueing the RCU updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * @func: actual callback function to be invoked after the grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * The callback function will be invoked some time after a full grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * period elapses, in other words after all currently executing RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * read-side critical sections have completed. call_rcu_tasks() assumes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * that the read-side critical sections end at a voluntary context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * or transition to usermode execution. As such, there are no read-side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * this primitive is intended to determine that all tasks have passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * through a safe state, not so much for data-strcuture synchronization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * See the description of call_rcu() for more detailed information on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * memory ordering guarantees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) call_rcu_tasks_generic(rhp, func, &rcu_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) EXPORT_SYMBOL_GPL(call_rcu_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * Control will return to the caller some time after a full rcu-tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * grace period has elapsed, in other words after all currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * executing rcu-tasks read-side critical sections have elapsed. These
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * read-side critical sections are delimited by calls to schedule(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * This is a very specialized primitive, intended only for a few uses in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * tracing and other situations requiring manipulation of function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * preambles and profiling hooks. The synchronize_rcu_tasks() function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * is not (yet) intended for heavy use from multiple CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * See the description of synchronize_rcu() for more detailed information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * on memory ordering guarantees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) void synchronize_rcu_tasks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) synchronize_rcu_tasks_generic(&rcu_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Although the current implementation is guaranteed to wait, it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * obligated to, for example, if there are no pending callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) void rcu_barrier_tasks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* There is only one callback queue, so this is easy. ;-) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) synchronize_rcu_tasks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static int __init rcu_spawn_tasks_kthread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) rcu_tasks.gp_sleep = HZ / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) rcu_tasks.init_fract = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) rcu_tasks.pregp_func = rcu_tasks_pregp_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) rcu_tasks.pertask_func = rcu_tasks_pertask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) rcu_tasks.postscan_func = rcu_tasks_postscan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) rcu_tasks.holdouts_func = check_all_holdout_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) rcu_tasks.postgp_func = rcu_tasks_postgp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) rcu_spawn_tasks_kthread_generic(&rcu_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) #ifndef CONFIG_TINY_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static void show_rcu_tasks_classic_gp_kthread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #endif /* #ifndef CONFIG_TINY_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* Do the srcu_read_lock() for the above synchronize_srcu(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* Do the srcu_read_unlock() for the above synchronize_srcu(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct task_struct *t = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) exit_tasks_rcu_finish_trace(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) #else /* #ifdef CONFIG_TASKS_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static inline void show_rcu_tasks_classic_gp_kthread(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) void exit_tasks_rcu_start(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) #endif /* #else #ifdef CONFIG_TASKS_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) #ifdef CONFIG_TASKS_RUDE_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) // passing an empty function to schedule_on_each_cpu(). This approach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) // provides an asynchronous call_rcu_tasks_rude() API and batching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) // of concurrent calls to the synchronous synchronize_rcu_rude() API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) // This sends IPIs far and wide and induces otherwise unnecessary context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) // switches on all online CPUs, whether idle or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) // Empty function to allow workqueues to force a context switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static void rcu_tasks_be_rude(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) // Wait for one rude RCU-tasks grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) rtp->n_ipis += cpumask_weight(cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) schedule_on_each_cpu(rcu_tasks_be_rude);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) "RCU Tasks Rude");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * @rhp: structure to be used for queueing the RCU updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * @func: actual callback function to be invoked after the grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * The callback function will be invoked some time after a full grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * period elapses, in other words after all currently executing RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * read-side critical sections have completed. call_rcu_tasks_rude()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * assumes that the read-side critical sections end at context switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * cond_resched_rcu_qs(), or transition to usermode execution. As such,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * there are no read-side primitives analogous to rcu_read_lock() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * rcu_read_unlock() because this primitive is intended to determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * that all tasks have passed through a safe state, not so much for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * data-strcuture synchronization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * See the description of call_rcu() for more detailed information on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * memory ordering guarantees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * Control will return to the caller some time after a rude rcu-tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * grace period has elapsed, in other words after all currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * executing rcu-tasks read-side critical sections have elapsed. These
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * read-side critical sections are delimited by calls to schedule(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * anyway) cond_resched().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * This is a very specialized primitive, intended only for a few uses in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * tracing and other situations requiring manipulation of function preambles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * (yet) intended for heavy use from multiple CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * See the description of synchronize_rcu() for more detailed information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * on memory ordering guarantees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) void synchronize_rcu_tasks_rude(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) synchronize_rcu_tasks_generic(&rcu_tasks_rude);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * Although the current implementation is guaranteed to wait, it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * obligated to, for example, if there are no pending callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) void rcu_barrier_tasks_rude(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* There is only one callback queue, so this is easy. ;-) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) synchronize_rcu_tasks_rude();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static int __init rcu_spawn_tasks_rude_kthread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) rcu_tasks_rude.gp_sleep = HZ / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) #ifndef CONFIG_TINY_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static void show_rcu_tasks_rude_gp_kthread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) #endif /* #ifndef CONFIG_TINY_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) #else /* #ifdef CONFIG_TASKS_RUDE_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static void show_rcu_tasks_rude_gp_kthread(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) #endif /* #else #ifdef CONFIG_TASKS_RUDE_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) ////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) // Tracing variant of Tasks RCU. This variant is designed to be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) // to protect tracing hooks, including those of BPF. This variant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) // therefore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) // 1. Has explicit read-side markers to allow finite grace periods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) // in the face of in-kernel loops for PREEMPT=n builds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) // 2. Protects code in the idle loop, exception entry/exit, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) // CPU-hotplug code paths, similar to the capabilities of SRCU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) // 3. Avoids expensive read-side instruction, having overhead similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) // to that of Preemptible RCU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) // There are of course downsides. The grace-period code can send IPIs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) // It is necessary to scan the full tasklist, much as for Tasks RCU. There
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) // is a single callback queue guarded by a single lock, again, much as for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) // Tasks RCU. If needed, these downsides can be at least partially remedied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) // Perhaps most important, this variant of RCU does not affect the vanilla
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) // readers can operate from idle, offline, and exception entry/exit in no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) // way allows rcu_preempt and rcu_sched readers to also do so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) // The lockdep state must be outside of #ifdef to be useful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static struct lock_class_key rcu_lock_trace_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct lockdep_map rcu_trace_lock_map =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) #ifdef CONFIG_TASKS_TRACE_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) // Record outstanding IPIs to each CPU. No point in sending two...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) // The number of detections of task quiescent state relying on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) // heavyweight readers executing explicit memory barriers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static unsigned long n_heavy_reader_attempts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static unsigned long n_heavy_reader_updates;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static unsigned long n_heavy_reader_ofl_updates;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) "RCU Tasks Trace");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * This irq_work handler allows rcu_read_unlock_trace() to be invoked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * while the scheduler locks are held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static void rcu_read_unlock_iw(struct irq_work *iwp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) wake_up(&trc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /* If we are the last reader, wake up the grace-period kthread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) int nq = t->trc_reader_special.b.need_qs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) t->trc_reader_special.b.need_mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) smp_mb(); // Pairs with update-side barriers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (nq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) WRITE_ONCE(t->trc_reader_nesting, nesting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) irq_work_queue(&rcu_tasks_trace_iw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /* Add a task to the holdout list, if it is not already on the list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (list_empty(&t->trc_holdout_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) get_task_struct(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) list_add(&t->trc_holdout_list, bhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* Remove a task from the holdout list, if it is in fact present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) static void trc_del_holdout(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (!list_empty(&t->trc_holdout_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) list_del_init(&t->trc_holdout_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) put_task_struct(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /* IPI handler to check task state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static void trc_read_check_handler(void *t_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct task_struct *t = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct task_struct *texp = t_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) // If the task is no longer running on this CPU, leave.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (unlikely(texp != t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) wake_up(&trc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) goto reset_ipi; // Already on holdout list, so will check later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) // If the task is not in a read-side critical section, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) // if this is the last reader, awaken the grace-period kthread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (likely(!t->trc_reader_nesting)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) wake_up(&trc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) // Mark as checked after decrement to avoid false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) // positives on the above WARN_ON_ONCE().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) WRITE_ONCE(t->trc_reader_checked, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) goto reset_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) // If we are racing with an rcu_read_unlock_trace(), try again later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (unlikely(t->trc_reader_nesting < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) wake_up(&trc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) goto reset_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) WRITE_ONCE(t->trc_reader_checked, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) // Get here if the task is in a read-side critical section. Set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) // its state so that it will awaken the grace-period kthread upon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) // exit from that critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) reset_ipi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) // Allow future IPIs to be sent on CPU and for task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) // Also order this IPI handler against any later manipulations of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) // the intended task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) smp_store_release(&per_cpu(trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /* Callback function for scheduler to check locked-down task. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static bool trc_inspect_reader(struct task_struct *t, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) int cpu = task_cpu(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) bool in_qs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) bool ofl = cpu_is_offline(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (task_curr(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) WARN_ON_ONCE(ofl && !is_idle_task(t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) // If no chance of heavyweight readers, do it the hard way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) // If heavyweight readers are enabled on the remote task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) // we can inspect its state despite its currently running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) // However, we cannot safely change its state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) n_heavy_reader_attempts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (!ofl && // Check for "running" idle tasks on offline CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return false; // No quiescent state, do it the hard way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) n_heavy_reader_updates++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (ofl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) n_heavy_reader_ofl_updates++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) in_qs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) in_qs = likely(!t->trc_reader_nesting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) // Mark as checked so that the grace-period kthread will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) // remove it from the holdout list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) t->trc_reader_checked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (in_qs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return true; // Already in quiescent state, done!!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) // The task is in a read-side critical section, so set up its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) // state so that it will awaken the grace-period kthread upon exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) // from that critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) atomic_inc(&trc_n_readers_need_end); // One more to wait on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /* Attempt to extract the state for the specified task. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) static void trc_wait_for_one_reader(struct task_struct *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct list_head *bhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) // If a previous IPI is still in flight, let it complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) // The current task had better be in a quiescent state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (t == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) t->trc_reader_checked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) WARN_ON_ONCE(t->trc_reader_nesting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) // Attempt to nail down the task for inspection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) get_task_struct(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) put_task_struct(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) put_task_struct(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) // If currently running, send an IPI, either way, add to list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) trc_add_holdout(t, bhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (task_curr(t) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) // The task is currently running, so try IPIing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) cpu = task_cpu(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) // If there is already an IPI outstanding, let it happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) atomic_inc(&trc_n_readers_need_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) per_cpu(trc_ipi_to_cpu, cpu) = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) t->trc_ipi_to_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) rcu_tasks_trace.n_ipis++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (smp_call_function_single(cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) trc_read_check_handler, t, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) // Just in case there is some other reason for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) // failure than the target CPU being offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) rcu_tasks_trace.n_ipis_fails++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) per_cpu(trc_ipi_to_cpu, cpu) = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) t->trc_ipi_to_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (atomic_dec_and_test(&trc_n_readers_need_end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) wake_up(&trc_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /* Initialize for a new RCU-tasks-trace grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) static void rcu_tasks_trace_pregp_step(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) // Allow for fast-acting IPIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) atomic_set(&trc_n_readers_need_end, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) // There shouldn't be any old IPIs, but...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) // Disable CPU hotplug across the tasklist scan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) // This also waits for all readers in CPU-hotplug code paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /* Do first-round processing for the specified task. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) static void rcu_tasks_trace_pertask(struct task_struct *t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct list_head *hop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) // During early boot when there is only the one boot CPU, there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) // is no idle task for the other CPUs. Just return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (unlikely(t == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) WRITE_ONCE(t->trc_reader_checked, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) t->trc_ipi_to_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) trc_wait_for_one_reader(t, hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * Do intermediate processing between task and holdout scans and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * pick up the idle tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) static void rcu_tasks_trace_postscan(struct list_head *hop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) rcu_tasks_trace_pertask(idle_task(cpu), hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) // Re-enable CPU hotplug now that the tasklist scan has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) // Wait for late-stage exiting tasks to finish exiting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) // These might have passed the call to exit_tasks_rcu_finish().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) // Any tasks that exit after this point will set ->trc_reader_checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /* Show the state of a task stalling the current RCU tasks trace GP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (*firstreport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) *firstreport = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) // FIXME: This should attempt to use try_invoke_on_nonrunning_task().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) cpu = task_cpu(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) t->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) ".i"[is_idle_task(t)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) t->trc_reader_nesting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) " N"[!!t->trc_reader_special.b.need_qs],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) sched_show_task(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /* List stalled IPIs for RCU tasks trace. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static void show_stalled_ipi_trace(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (per_cpu(trc_ipi_to_cpu, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) pr_alert("\tIPI outstanding to CPU %d\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /* Do one scan of the holdout list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static void check_all_holdout_tasks_trace(struct list_head *hop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) bool needreport, bool *firstreport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) struct task_struct *g, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) // Disable CPU hotplug across the holdout list scan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) // If safe and needed, try to check the current task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) !READ_ONCE(t->trc_reader_checked))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) trc_wait_for_one_reader(t, hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) // If check succeeded, remove this task from the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (READ_ONCE(t->trc_reader_checked))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) trc_del_holdout(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) else if (needreport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) show_stalled_task_trace(t, firstreport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) // Re-enable CPU hotplug now that the holdout list scan has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (needreport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (firstreport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) show_stalled_ipi_trace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* Wait for grace period to complete and provide ordering. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) bool firstreport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct task_struct *g, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) LIST_HEAD(holdouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) // Remove the safety count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) smp_mb__before_atomic(); // Order vs. earlier atomics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) atomic_dec(&trc_n_readers_need_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) smp_mb__after_atomic(); // Order vs. later atomics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) // Wait for readers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) ret = wait_event_idle_exclusive_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) trc_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) atomic_read(&trc_n_readers_need_end) == 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) READ_ONCE(rcu_task_stall_timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) break; // Count reached zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) // Stall warning time, so make a list of the offenders.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) for_each_process_thread(g, t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (READ_ONCE(t->trc_reader_special.b.need_qs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) trc_add_holdout(t, &holdouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) firstreport = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (READ_ONCE(t->trc_reader_special.b.need_qs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) show_stalled_task_trace(t, &firstreport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) trc_del_holdout(t); // Release task_struct reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (firstreport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) show_stalled_ipi_trace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) smp_mb(); // Caller's code must be ordered after wakeup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) // Pairs with pretty much every ordering primitive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /* Report any needed quiescent state for this exiting task. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) static void exit_tasks_rcu_finish_trace(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) WRITE_ONCE(t->trc_reader_checked, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) WARN_ON_ONCE(t->trc_reader_nesting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) WRITE_ONCE(t->trc_reader_nesting, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) rcu_read_unlock_trace_special(t, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * @rhp: structure to be used for queueing the RCU updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * @func: actual callback function to be invoked after the grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * The callback function will be invoked some time after a full grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * period elapses, in other words after all currently executing RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * read-side critical sections have completed. call_rcu_tasks_trace()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * assumes that the read-side critical sections end at context switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * cond_resched_rcu_qs(), or transition to usermode execution. As such,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * there are no read-side primitives analogous to rcu_read_lock() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * rcu_read_unlock() because this primitive is intended to determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * that all tasks have passed through a safe state, not so much for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * data-strcuture synchronization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * See the description of call_rcu() for more detailed information on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * memory ordering guarantees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * Control will return to the caller some time after a trace rcu-tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * grace period has elapsed, in other words after all currently executing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * rcu-tasks read-side critical sections have elapsed. These read-side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * critical sections are delimited by calls to rcu_read_lock_trace()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * and rcu_read_unlock_trace().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * This is a very specialized primitive, intended only for a few uses in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * tracing and other situations requiring manipulation of function preambles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * (yet) intended for heavy use from multiple CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * See the description of synchronize_rcu() for more detailed information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * on memory ordering guarantees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) void synchronize_rcu_tasks_trace(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) synchronize_rcu_tasks_generic(&rcu_tasks_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * Although the current implementation is guaranteed to wait, it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * obligated to, for example, if there are no pending callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) void rcu_barrier_tasks_trace(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /* There is only one callback queue, so this is easy. ;-) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) synchronize_rcu_tasks_trace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static int __init rcu_spawn_tasks_trace_kthread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) rcu_tasks_trace.gp_sleep = HZ / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) rcu_tasks_trace.init_fract = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) rcu_tasks_trace.gp_sleep = HZ / 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (rcu_tasks_trace.gp_sleep <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) rcu_tasks_trace.gp_sleep = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) rcu_tasks_trace.init_fract = HZ / 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (rcu_tasks_trace.init_fract <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) rcu_tasks_trace.init_fract = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) #ifndef CONFIG_TINY_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) static void show_rcu_tasks_trace_gp_kthread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) char buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) data_race(n_heavy_reader_ofl_updates),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) data_race(n_heavy_reader_updates),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) data_race(n_heavy_reader_attempts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) #endif /* #ifndef CONFIG_TINY_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) static inline void show_rcu_tasks_trace_gp_kthread(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) #ifndef CONFIG_TINY_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) void show_rcu_tasks_gp_kthreads(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) show_rcu_tasks_classic_gp_kthread();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) show_rcu_tasks_rude_gp_kthread();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) show_rcu_tasks_trace_gp_kthread();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) #endif /* #ifndef CONFIG_TINY_RCU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) void __init rcu_init_tasks_generic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) #ifdef CONFIG_TASKS_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) rcu_spawn_tasks_kthread();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) #ifdef CONFIG_TASKS_RUDE_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) rcu_spawn_tasks_rude_kthread();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) #ifdef CONFIG_TASKS_TRACE_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) rcu_spawn_tasks_trace_kthread();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static inline void rcu_tasks_bootup_oddness(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) void show_rcu_tasks_gp_kthreads(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */