| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| #include "../locking/rtmutex_common.h" |
| |
| #ifdef CONFIG_RCU_NOCB_CPU |
| static cpumask_var_t rcu_nocb_mask; |
| static bool __read_mostly rcu_nocb_poll; |
| #endif |
| |
| |
| |
| |
| |
| static void __init rcu_bootup_announce_oddness(void) |
| { |
| <------>if (IS_ENABLED(CONFIG_RCU_TRACE)) |
| <------><------>pr_info("\tRCU event tracing is enabled.\n"); |
| <------>if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) || |
| <------> (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32)) |
| <------><------>pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n", |
| <------><------><------>RCU_FANOUT); |
| <------>if (rcu_fanout_exact) |
| <------><------>pr_info("\tHierarchical RCU autobalancing is disabled.\n"); |
| <------>if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ)) |
| <------><------>pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); |
| <------>if (IS_ENABLED(CONFIG_PROVE_RCU)) |
| <------><------>pr_info("\tRCU lockdep checking is enabled.\n"); |
| <------>if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) |
| <------><------>pr_info("\tRCU strict (and thus non-scalable) grace periods enabled.\n"); |
| <------>if (RCU_NUM_LVLS >= 4) |
| <------><------>pr_info("\tFour(or more)-level hierarchy is enabled.\n"); |
| <------>if (RCU_FANOUT_LEAF != 16) |
| <------><------>pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", |
| <------><------><------>RCU_FANOUT_LEAF); |
| <------>if (rcu_fanout_leaf != RCU_FANOUT_LEAF) |
| <------><------>pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", |
| <------><------><------>rcu_fanout_leaf); |
| <------>if (nr_cpu_ids != NR_CPUS) |
| <------><------>pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids); |
| #ifdef CONFIG_RCU_BOOST |
| <------>pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", |
| <------><------>kthread_prio, CONFIG_RCU_BOOST_DELAY); |
| #endif |
| <------>if (blimit != DEFAULT_RCU_BLIMIT) |
| <------><------>pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit); |
| <------>if (qhimark != DEFAULT_RCU_QHIMARK) |
| <------><------>pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark); |
| <------>if (qlowmark != DEFAULT_RCU_QLOMARK) |
| <------><------>pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark); |
| <------>if (qovld != DEFAULT_RCU_QOVLD) |
| <------><------>pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld); |
| <------>if (jiffies_till_first_fqs != ULONG_MAX) |
| <------><------>pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs); |
| <------>if (jiffies_till_next_fqs != ULONG_MAX) |
| <------><------>pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs); |
| <------>if (jiffies_till_sched_qs != ULONG_MAX) |
| <------><------>pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs); |
| <------>if (rcu_kick_kthreads) |
| <------><------>pr_info("\tKick kthreads if too-long grace period.\n"); |
| <------>if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD)) |
| <------><------>pr_info("\tRCU callback double-/use-after-free debug enabled.\n"); |
| <------>if (gp_preinit_delay) |
| <------><------>pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay); |
| <------>if (gp_init_delay) |
| <------><------>pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay); |
| <------>if (gp_cleanup_delay) |
| <------><------>pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay); |
| <------>if (!use_softirq) |
| <------><------>pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n"); |
| <------>if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG)) |
| <------><------>pr_info("\tRCU debug extended QS entry/exit.\n"); |
| <------>rcupdate_announce_bootup_oddness(); |
| } |
| |
| #ifdef CONFIG_PREEMPT_RCU |
| |
| static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake); |
| static void rcu_read_unlock_special(struct task_struct *t); |
| |
| |
| |
| |
| static void __init rcu_bootup_announce(void) |
| { |
| <------>pr_info("Preemptible hierarchical RCU implementation.\n"); |
| <------>rcu_bootup_announce_oddness(); |
| } |
| |
| |
| #define RCU_GP_TASKS 0x8 |
| #define RCU_EXP_TASKS 0x4 |
| #define RCU_GP_BLKD 0x2 |
| #define RCU_EXP_BLKD 0x1 |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) |
| <------>__releases(rnp->lock) |
| { |
| <------>int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + |
| <------><------><------> (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + |
| <------><------><------> (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + |
| <------><------><------> (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); |
| <------>struct task_struct *t = current; |
| |
| <------>raw_lockdep_assert_held_rcu_node(rnp); |
| <------>WARN_ON_ONCE(rdp->mynode != rnp); |
| <------>WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); |
| <------> |
| <------>WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask & |
| <------><------> rdp->grpmask); |
| |
| <------> |
| <------> * Decide where to queue the newly blocked task. In theory, |
| <------> * this could be an if-statement. In practice, when I tried |
| <------> * that, it was quite messy. |
| <------> */ |
| <------>switch (blkd_state) { |
| <------>case 0: |
| <------>case RCU_EXP_TASKS: |
| <------>case RCU_EXP_TASKS + RCU_GP_BLKD: |
| <------>case RCU_GP_TASKS: |
| <------>case RCU_GP_TASKS + RCU_EXP_TASKS: |
| |
| <------><------> |
| <------><------> * Blocking neither GP, or first task blocking the normal |
| <------><------> * GP but not blocking the already-waiting expedited GP. |
| <------><------> * Queue at the head of the list to avoid unnecessarily |
| <------><------> * blocking the already-waiting GPs. |
| <------><------> */ |
| <------><------>list_add(&t->rcu_node_entry, &rnp->blkd_tasks); |
| <------><------>break; |
| |
| <------>case RCU_EXP_BLKD: |
| <------>case RCU_GP_BLKD: |
| <------>case RCU_GP_BLKD + RCU_EXP_BLKD: |
| <------>case RCU_GP_TASKS + RCU_EXP_BLKD: |
| <------>case RCU_GP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: |
| <------>case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: |
| |
| <------><------> |
| <------><------> * First task arriving that blocks either GP, or first task |
| <------><------> * arriving that blocks the expedited GP (with the normal |
| <------><------> * GP already waiting), or a task arriving that blocks |
| <------><------> * both GPs with both GPs already waiting. Queue at the |
| <------><------> * tail of the list to avoid any GP waiting on any of the |
| <------><------> * already queued tasks that are not blocking it. |
| <------><------> */ |
| <------><------>list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); |
| <------><------>break; |
| |
| <------>case RCU_EXP_TASKS + RCU_EXP_BLKD: |
| <------>case RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD: |
| <------>case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_EXP_BLKD: |
| |
| <------><------> |
| <------><------> * Second or subsequent task blocking the expedited GP. |
| <------><------> * The task either does not block the normal GP, or is the |
| <------><------> * first task blocking the normal GP. Queue just after |
| <------><------> * the first task blocking the expedited GP. |
| <------><------> */ |
| <------><------>list_add(&t->rcu_node_entry, rnp->exp_tasks); |
| <------><------>break; |
| |
| <------>case RCU_GP_TASKS + RCU_GP_BLKD: |
| <------>case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD: |
| |
| <------><------> |
| <------><------> * Second or subsequent task blocking the normal GP. |
| <------><------> * The task does not block the expedited GP. Queue just |
| <------><------> * after the first task blocking the normal GP. |
| <------><------> */ |
| <------><------>list_add(&t->rcu_node_entry, rnp->gp_tasks); |
| <------><------>break; |
| |
| <------>default: |
| |
| <------><------> |
| <------><------>WARN_ON_ONCE(1); |
| <------><------>break; |
| <------>} |
| |
| <------> |
| <------> * We have now queued the task. If it was the first one to |
| <------> * block either grace period, update the ->gp_tasks and/or |
| <------> * ->exp_tasks pointers, respectively, to reference the newly |
| <------> * blocked tasks. |
| <------> */ |
| <------>if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) { |
| <------><------>WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); |
| <------><------>WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq); |
| <------>} |
| <------>if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) |
| <------><------>WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry); |
| <------>WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) != |
| <------><------> !(rnp->qsmask & rdp->grpmask)); |
| <------>WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) != |
| <------><------> !(rnp->expmask & rdp->grpmask)); |
| <------>raw_spin_unlock_rcu_node(rnp); |
| |
| <------> |
| <------> * Report the quiescent state for the expedited GP. This expedited |
| <------> * GP should not be able to end until we report, so there should be |
| <------> * no need to check for a subsequent expedited GP. (Though we are |
| <------> * still in a quiescent state in any case.) |
| <------> */ |
| <------>if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs) |
| <------><------>rcu_report_exp_rdp(rdp); |
| <------>else |
| <------><------>WARN_ON_ONCE(rdp->exp_deferred_qs); |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static void rcu_qs(void) |
| { |
| <------>RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n"); |
| <------>if (__this_cpu_read(rcu_data.cpu_no_qs.s)) { |
| <------><------>trace_rcu_grace_period(TPS("rcu_preempt"), |
| <------><------><------><------> __this_cpu_read(rcu_data.gp_seq), |
| <------><------><------><------> TPS("cpuqs")); |
| <------><------>__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); |
| <------><------>barrier(); |
| <------><------>WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false); |
| <------>} |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| void rcu_note_context_switch(bool preempt) |
| { |
| <------>struct task_struct *t = current; |
| <------>struct rcu_data *rdp = this_cpu_ptr(&rcu_data); |
| <------>struct rcu_node *rnp; |
| |
| <------>trace_rcu_utilization(TPS("Start context switch")); |
| <------>lockdep_assert_irqs_disabled(); |
| <------>WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0); |
| <------>if (rcu_preempt_depth() > 0 && |
| <------> !t->rcu_read_unlock_special.b.blocked) { |
| |
| <------><------> |
| <------><------>rnp = rdp->mynode; |
| <------><------>raw_spin_lock_rcu_node(rnp); |
| <------><------>t->rcu_read_unlock_special.b.blocked = true; |
| <------><------>t->rcu_blocked_node = rnp; |
| |
| <------><------> |
| <------><------> * Verify the CPU's sanity, trace the preemption, and |
| <------><------> * then queue the task as required based on the states |
| <------><------> * of any ongoing and expedited grace periods. |
| <------><------> */ |
| <------><------>WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); |
| <------><------>WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); |
| <------><------>trace_rcu_preempt_task(rcu_state.name, |
| <------><------><------><------> t->pid, |
| <------><------><------><------> (rnp->qsmask & rdp->grpmask) |
| <------><------><------><------> ? rnp->gp_seq |
| <------><------><------><------> : rcu_seq_snap(&rnp->gp_seq)); |
| <------><------>rcu_preempt_ctxt_queue(rnp, rdp); |
| <------>} else { |
| <------><------>rcu_preempt_deferred_qs(t); |
| <------>} |
| |
| <------> |
| <------> * Either we were not in an RCU read-side critical section to |
| <------> * begin with, or we have now recorded that critical section |
| <------> * globally. Either way, we can now note a quiescent state |
| <------> * for this CPU. Again, if we were in an RCU read-side critical |
| <------> * section, and if that critical section was blocking the current |
| <------> * grace period, then the fact that the task has been enqueued |
| <------> * means that we continue to block the current grace period. |
| <------> */ |
| <------>rcu_qs(); |
| <------>if (rdp->exp_deferred_qs) |
| <------><------>rcu_report_exp_rdp(rdp); |
| <------>rcu_tasks_qs(current, preempt); |
| <------>trace_rcu_utilization(TPS("End context switch")); |
| } |
| EXPORT_SYMBOL_GPL(rcu_note_context_switch); |
| |
| |
| |
| |
| |
| |
| static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) |
| { |
| <------>return READ_ONCE(rnp->gp_tasks) != NULL; |
| } |
| |
| |
| #define RCU_NEST_PMAX (INT_MAX / 2) |
| |
| static void rcu_preempt_read_enter(void) |
| { |
| <------>current->rcu_read_lock_nesting++; |
| } |
| |
| static int rcu_preempt_read_exit(void) |
| { |
| <------>return --current->rcu_read_lock_nesting; |
| } |
| |
| static void rcu_preempt_depth_set(int val) |
| { |
| <------>current->rcu_read_lock_nesting = val; |
| } |
| |
| |
| |
| |
| |
| |
| void __rcu_read_lock(void) |
| { |
| <------>rcu_preempt_read_enter(); |
| <------>if (IS_ENABLED(CONFIG_PROVE_LOCKING)) |
| <------><------>WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX); |
| <------>if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && rcu_state.gp_kthread) |
| <------><------>WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true); |
| <------>barrier(); |
| } |
| EXPORT_SYMBOL_GPL(__rcu_read_lock); |
| |
| |
| |
| |
| |
| |
| |
| |
| void __rcu_read_unlock(void) |
| { |
| <------>struct task_struct *t = current; |
| |
| <------>if (rcu_preempt_read_exit() == 0) { |
| <------><------>barrier(); |
| <------><------>if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) |
| <------><------><------>rcu_read_unlock_special(t); |
| <------>} |
| <------>if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { |
| <------><------>int rrln = rcu_preempt_depth(); |
| |
| <------><------>WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX); |
| <------>} |
| } |
| EXPORT_SYMBOL_GPL(__rcu_read_unlock); |
| |
| |
| |
| |
| |
| static struct list_head *rcu_next_node_entry(struct task_struct *t, |
| <------><------><------><------><------> struct rcu_node *rnp) |
| { |
| <------>struct list_head *np; |
| |
| <------>np = t->rcu_node_entry.next; |
| <------>if (np == &rnp->blkd_tasks) |
| <------><------>np = NULL; |
| <------>return np; |
| } |
| |
| |
| |
| |
| |
| static bool rcu_preempt_has_tasks(struct rcu_node *rnp) |
| { |
| <------>return !list_empty(&rnp->blkd_tasks); |
| } |
| |
| |
| |
| |
| |
| |
| static void |
| rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) |
| { |
| <------>bool empty_exp; |
| <------>bool empty_norm; |
| <------>bool empty_exp_now; |
| <------>struct list_head *np; |
| <------>bool drop_boost_mutex = false; |
| <------>struct rcu_data *rdp; |
| <------>struct rcu_node *rnp; |
| <------>union rcu_special special; |
| |
| <------> |
| <------> * If RCU core is waiting for this CPU to exit its critical section, |
| <------> * report the fact that it has exited. Because irqs are disabled, |
| <------> * t->rcu_read_unlock_special cannot change. |
| <------> */ |
| <------>special = t->rcu_read_unlock_special; |
| <------>rdp = this_cpu_ptr(&rcu_data); |
| <------>if (!special.s && !rdp->exp_deferred_qs) { |
| <------><------>local_irq_restore(flags); |
| <------><------>return; |
| <------>} |
| <------>t->rcu_read_unlock_special.s = 0; |
| <------>if (special.b.need_qs) { |
| <------><------>if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) { |
| <------><------><------>rcu_report_qs_rdp(rdp); |
| <------><------><------>udelay(rcu_unlock_delay); |
| <------><------>} else { |
| <------><------><------>rcu_qs(); |
| <------><------>} |
| <------>} |
| |
| <------> |
| <------> * Respond to a request by an expedited grace period for a |
| <------> * quiescent state from this CPU. Note that requests from |
| <------> * tasks are handled when removing the task from the |
| <------> * blocked-tasks list below. |
| <------> */ |
| <------>if (rdp->exp_deferred_qs) |
| <------><------>rcu_report_exp_rdp(rdp); |
| |
| <------> |
| <------>if (special.b.blocked) { |
| |
| <------><------> |
| <------><------> * Remove this task from the list it blocked on. The task |
| <------><------> * now remains queued on the rcu_node corresponding to the |
| <------><------> * CPU it first blocked on, so there is no longer any need |
| <------><------> * to loop. Retain a WARN_ON_ONCE() out of sheer paranoia. |
| <------><------> */ |
| <------><------>rnp = t->rcu_blocked_node; |
| <------><------>raw_spin_lock_rcu_node(rnp); |
| <------><------>WARN_ON_ONCE(rnp != t->rcu_blocked_node); |
| <------><------>WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); |
| <------><------>empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); |
| <------><------>WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq && |
| <------><------><------> (!empty_norm || rnp->qsmask)); |
| <------><------>empty_exp = sync_rcu_exp_done(rnp); |
| <------><------>smp_mb(); |
| <------><------>np = rcu_next_node_entry(t, rnp); |
| <------><------>list_del_init(&t->rcu_node_entry); |
| <------><------>t->rcu_blocked_node = NULL; |
| <------><------>trace_rcu_unlock_preempted_task(TPS("rcu_preempt"), |
| <------><------><------><------><------><------>rnp->gp_seq, t->pid); |
| <------><------>if (&t->rcu_node_entry == rnp->gp_tasks) |
| <------><------><------>WRITE_ONCE(rnp->gp_tasks, np); |
| <------><------>if (&t->rcu_node_entry == rnp->exp_tasks) |
| <------><------><------>WRITE_ONCE(rnp->exp_tasks, np); |
| <------><------>if (IS_ENABLED(CONFIG_RCU_BOOST)) { |
| <------><------><------> |
| <------><------><------>drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; |
| <------><------><------>if (&t->rcu_node_entry == rnp->boost_tasks) |
| <------><------><------><------>WRITE_ONCE(rnp->boost_tasks, np); |
| <------><------>} |
| |
| <------><------> |
| <------><------> * If this was the last task on the current list, and if |
| <------><------> * we aren't waiting on any CPUs, report the quiescent state. |
| <------><------> * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, |
| <------><------> * so we must take a snapshot of the expedited state. |
| <------><------> */ |
| <------><------>empty_exp_now = sync_rcu_exp_done(rnp); |
| <------><------>if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { |
| <------><------><------>trace_rcu_quiescent_state_report(TPS("preempt_rcu"), |
| <------><------><------><------><------><------><------> rnp->gp_seq, |
| <------><------><------><------><------><------><------> 0, rnp->qsmask, |
| <------><------><------><------><------><------><------> rnp->level, |
| <------><------><------><------><------><------><------> rnp->grplo, |
| <------><------><------><------><------><------><------> rnp->grphi, |
| <------><------><------><------><------><------><------> !!rnp->gp_tasks); |
| <------><------><------>rcu_report_unblock_qs_rnp(rnp, flags); |
| <------><------>} else { |
| <------><------><------>raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| <------><------>} |
| |
| <------><------> |
| <------><------> * If this was the last task on the expedited lists, |
| <------><------> * then we need to report up the rcu_node hierarchy. |
| <------><------> */ |
| <------><------>if (!empty_exp && empty_exp_now) |
| <------><------><------>rcu_report_exp_rnp(rnp, true); |
| |
| <------><------> |
| <------><------>if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) |
| <------><------><------>rt_mutex_futex_unlock(&rnp->boost_mtx); |
| |
| <------>} else { |
| <------><------>local_irq_restore(flags); |
| <------>} |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static bool rcu_preempt_need_deferred_qs(struct task_struct *t) |
| { |
| <------>return (__this_cpu_read(rcu_data.exp_deferred_qs) || |
| <------><------>READ_ONCE(t->rcu_read_unlock_special.s)) && |
| <------> rcu_preempt_depth() == 0; |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| static void rcu_preempt_deferred_qs(struct task_struct *t) |
| { |
| <------>unsigned long flags; |
| |
| <------>if (!rcu_preempt_need_deferred_qs(t)) |
| <------><------>return; |
| <------>local_irq_save(flags); |
| <------>rcu_preempt_deferred_qs_irqrestore(t, flags); |
| } |
| |
| |
| |
| |
| static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp) |
| { |
| <------>struct rcu_data *rdp; |
| |
| <------>rdp = container_of(iwp, struct rcu_data, defer_qs_iw); |
| <------>rdp->defer_qs_iw_pending = false; |
| } |
| |
| |
| |
| |
| |
| |
| static void rcu_read_unlock_special(struct task_struct *t) |
| { |
| <------>unsigned long flags; |
| <------>bool preempt_bh_were_disabled = |
| <------><------><------>!!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)); |
| <------>bool irqs_were_disabled; |
| |
| <------> |
| <------>if (in_nmi()) |
| <------><------>return; |
| |
| <------>local_irq_save(flags); |
| <------>irqs_were_disabled = irqs_disabled_flags(flags); |
| <------>if (preempt_bh_were_disabled || irqs_were_disabled) { |
| <------><------>bool exp; |
| <------><------>struct rcu_data *rdp = this_cpu_ptr(&rcu_data); |
| <------><------>struct rcu_node *rnp = rdp->mynode; |
| |
| <------><------>exp = (t->rcu_blocked_node && |
| <------><------> READ_ONCE(t->rcu_blocked_node->exp_tasks)) || |
| <------><------> (rdp->grpmask & READ_ONCE(rnp->expmask)); |
| <------><------> |
| <------><------>if (use_softirq && (in_irq() || (exp && !irqs_were_disabled))) { |
| <------><------><------> |
| <------><------><------> |
| <------><------><------>raise_softirq_irqoff(RCU_SOFTIRQ); |
| <------><------>} else { |
| <------><------><------> |
| <------><------><------> |
| <------><------><------> |
| <------><------><------>set_tsk_need_resched(current); |
| <------><------><------>set_preempt_need_resched(); |
| <------><------><------>if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled && |
| <------><------><------> !rdp->defer_qs_iw_pending && exp && cpu_online(rdp->cpu)) { |
| <------><------><------><------> |
| <------><------><------><------> |
| <------><------><------><------>init_irq_work(&rdp->defer_qs_iw, |
| <------><------><------><------><------> rcu_preempt_deferred_qs_handler); |
| <------><------><------><------>rdp->defer_qs_iw_pending = true; |
| <------><------><------><------>irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); |
| <------><------><------>} |
| <------><------>} |
| <------><------>local_irq_restore(flags); |
| <------><------>return; |
| <------>} |
| <------>rcu_preempt_deferred_qs_irqrestore(t, flags); |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) |
| { |
| <------>struct task_struct *t; |
| |
| <------>RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); |
| <------>raw_lockdep_assert_held_rcu_node(rnp); |
| <------>if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp))) |
| <------><------>dump_blkd_tasks(rnp, 10); |
| <------>if (rcu_preempt_has_tasks(rnp) && |
| <------> (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { |
| <------><------>WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next); |
| <------><------>t = container_of(rnp->gp_tasks, struct task_struct, |
| <------><------><------><------> rcu_node_entry); |
| <------><------>trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"), |
| <------><------><------><------><------><------>rnp->gp_seq, t->pid); |
| <------>} |
| <------>WARN_ON_ONCE(rnp->qsmask); |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| static void rcu_flavor_sched_clock_irq(int user) |
| { |
| <------>struct task_struct *t = current; |
| |
| <------>lockdep_assert_irqs_disabled(); |
| <------>if (user || rcu_is_cpu_rrupt_from_idle()) { |
| <------><------>rcu_note_voluntary_context_switch(current); |
| <------>} |
| <------>if (rcu_preempt_depth() > 0 || |
| <------> (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) { |
| <------><------> |
| <------><------>if (rcu_preempt_need_deferred_qs(t)) { |
| <------><------><------>set_tsk_need_resched(t); |
| <------><------><------>set_preempt_need_resched(); |
| <------><------>} |
| <------>} else if (rcu_preempt_need_deferred_qs(t)) { |
| <------><------>rcu_preempt_deferred_qs(t); |
| <------><------>return; |
| <------>} else if (!WARN_ON_ONCE(rcu_preempt_depth())) { |
| <------><------>rcu_qs(); |
| <------><------>return; |
| <------>} |
| |
| <------> |
| <------>if (rcu_preempt_depth() > 0 && |
| <------> __this_cpu_read(rcu_data.core_needs_qs) && |
| <------> __this_cpu_read(rcu_data.cpu_no_qs.b.norm) && |
| <------> !t->rcu_read_unlock_special.b.need_qs && |
| <------> time_after(jiffies, rcu_state.gp_start + HZ)) |
| <------><------>t->rcu_read_unlock_special.b.need_qs = true; |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| void exit_rcu(void) |
| { |
| <------>struct task_struct *t = current; |
| |
| <------>if (unlikely(!list_empty(¤t->rcu_node_entry))) { |
| <------><------>rcu_preempt_depth_set(1); |
| <------><------>barrier(); |
| <------><------>WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true); |
| <------>} else if (unlikely(rcu_preempt_depth())) { |
| <------><------>rcu_preempt_depth_set(1); |
| <------>} else { |
| <------><------>return; |
| <------>} |
| <------>__rcu_read_unlock(); |
| <------>rcu_preempt_deferred_qs(current); |
| } |
| |
| |
| |
| |
| |
| static void |
| dump_blkd_tasks(struct rcu_node *rnp, int ncheck) |
| { |
| <------>int cpu; |
| <------>int i; |
| <------>struct list_head *lhp; |
| <------>bool onl; |
| <------>struct rcu_data *rdp; |
| <------>struct rcu_node *rnp1; |
| |
| <------>raw_lockdep_assert_held_rcu_node(rnp); |
| <------>pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", |
| <------><------>__func__, rnp->grplo, rnp->grphi, rnp->level, |
| <------><------>(long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs); |
| <------>for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) |
| <------><------>pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n", |
| <------><------><------>__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext); |
| <------>pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n", |
| <------><------>__func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks), |
| <------><------>READ_ONCE(rnp->exp_tasks)); |
| <------>pr_info("%s: ->blkd_tasks", __func__); |
| <------>i = 0; |
| <------>list_for_each(lhp, &rnp->blkd_tasks) { |
| <------><------>pr_cont(" %p", lhp); |
| <------><------>if (++i >= ncheck) |
| <------><------><------>break; |
| <------>} |
| <------>pr_cont("\n"); |
| <------>for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { |
| <------><------>rdp = per_cpu_ptr(&rcu_data, cpu); |
| <------><------>onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); |
| <------><------>pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n", |
| <------><------><------>cpu, ".o"[onl], |
| <------><------><------>(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, |
| <------><------><------>(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); |
| <------>} |
| } |
| |
| #else |
| |
| |
| |
| |
| |
| |
| void rcu_read_unlock_strict(void) |
| { |
| <------>struct rcu_data *rdp; |
| |
| <------>if (!IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) || |
| <------> irqs_disabled() || preempt_count() || !rcu_state.gp_kthread) |
| <------><------>return; |
| <------>rdp = this_cpu_ptr(&rcu_data); |
| <------>rcu_report_qs_rdp(rdp); |
| <------>udelay(rcu_unlock_delay); |
| } |
| EXPORT_SYMBOL_GPL(rcu_read_unlock_strict); |
| |
| |
| |
| |
| static void __init rcu_bootup_announce(void) |
| { |
| <------>pr_info("Hierarchical RCU implementation.\n"); |
| <------>rcu_bootup_announce_oddness(); |
| } |
| |
| |
| |
| |
| |
| |
| |
| static void rcu_qs(void) |
| { |
| <------>RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!"); |
| <------>if (!__this_cpu_read(rcu_data.cpu_no_qs.s)) |
| <------><------>return; |
| <------>trace_rcu_grace_period(TPS("rcu_sched"), |
| <------><------><------> __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs")); |
| <------>__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); |
| <------>if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) |
| <------><------>return; |
| <------>__this_cpu_write(rcu_data.cpu_no_qs.b.exp, false); |
| <------>rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| void rcu_all_qs(void) |
| { |
| <------>unsigned long flags; |
| |
| <------>if (!raw_cpu_read(rcu_data.rcu_urgent_qs)) |
| <------><------>return; |
| <------>preempt_disable(); |
| <------> |
| <------>if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { |
| <------><------>preempt_enable(); |
| <------><------>return; |
| <------>} |
| <------>this_cpu_write(rcu_data.rcu_urgent_qs, false); |
| <------>if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) { |
| <------><------>local_irq_save(flags); |
| <------><------>rcu_momentary_dyntick_idle(); |
| <------><------>local_irq_restore(flags); |
| <------>} |
| <------>rcu_qs(); |
| <------>preempt_enable(); |
| } |
| EXPORT_SYMBOL_GPL(rcu_all_qs); |
| |
| |
| |
| |
| void rcu_note_context_switch(bool preempt) |
| { |
| <------>trace_rcu_utilization(TPS("Start context switch")); |
| <------>rcu_qs(); |
| <------> |
| <------>if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) |
| <------><------>goto out; |
| <------>this_cpu_write(rcu_data.rcu_urgent_qs, false); |
| <------>if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) |
| <------><------>rcu_momentary_dyntick_idle(); |
| <------>rcu_tasks_qs(current, preempt); |
| out: |
| <------>trace_rcu_utilization(TPS("End context switch")); |
| } |
| EXPORT_SYMBOL_GPL(rcu_note_context_switch); |
| |
| |
| |
| |
| |
| static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) |
| { |
| <------>return 0; |
| } |
| |
| |
| |
| |
| static bool rcu_preempt_has_tasks(struct rcu_node *rnp) |
| { |
| <------>return false; |
| } |
| |
| |
| |
| |
| |
| static bool rcu_preempt_need_deferred_qs(struct task_struct *t) |
| { |
| <------>return false; |
| } |
| static void rcu_preempt_deferred_qs(struct task_struct *t) { } |
| |
| |
| |
| |
| |
| |
| static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) |
| { |
| <------>WARN_ON_ONCE(rnp->qsmask); |
| } |
| |
| |
| |
| |
| |
| static void rcu_flavor_sched_clock_irq(int user) |
| { |
| <------>if (user || rcu_is_cpu_rrupt_from_idle()) { |
| |
| <------><------> |
| <------><------> * Get here if this CPU took its interrupt from user |
| <------><------> * mode or from the idle loop, and if this is not a |
| <------><------> * nested interrupt. In this case, the CPU is in |
| <------><------> * a quiescent state, so note it. |
| <------><------> * |
| <------><------> * No memory barrier is required here because rcu_qs() |
| <------><------> * references only CPU-local variables that other CPUs |
| <------><------> * neither access nor modify, at least not while the |
| <------><------> * corresponding CPU is online. |
| <------><------> */ |
| |
| <------><------>rcu_qs(); |
| <------>} |
| } |
| |
| |
| |
| |
| |
| void exit_rcu(void) |
| { |
| } |
| |
| |
| |
| |
| static void |
| dump_blkd_tasks(struct rcu_node *rnp, int ncheck) |
| { |
| <------>WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); |
| } |
| |
| #endif |
| |
| |
| |
| |
| static void rcu_cpu_kthread_setup(unsigned int cpu) |
| { |
| #ifdef CONFIG_RCU_BOOST |
| <------>struct sched_param sp; |
| |
| <------>sp.sched_priority = kthread_prio; |
| <------>sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); |
| #endif |
| } |
| |
| #ifdef CONFIG_RCU_BOOST |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static int rcu_boost(struct rcu_node *rnp) |
| { |
| <------>unsigned long flags; |
| <------>struct task_struct *t; |
| <------>struct list_head *tb; |
| |
| <------>if (READ_ONCE(rnp->exp_tasks) == NULL && |
| <------> READ_ONCE(rnp->boost_tasks) == NULL) |
| <------><------>return 0; |
| |
| <------>raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| |
| <------> |
| <------> * Recheck under the lock: all tasks in need of boosting |
| <------> * might exit their RCU read-side critical sections on their own. |
| <------> */ |
| <------>if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { |
| <------><------>raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| <------><------>return 0; |
| <------>} |
| |
| <------> |
| <------> * Preferentially boost tasks blocking expedited grace periods. |
| <------> * This cannot starve the normal grace periods because a second |
| <------> * expedited grace period must boost all blocked tasks, including |
| <------> * those blocking the pre-existing normal grace period. |
| <------> */ |
| <------>if (rnp->exp_tasks != NULL) |
| <------><------>tb = rnp->exp_tasks; |
| <------>else |
| <------><------>tb = rnp->boost_tasks; |
| |
| <------> |
| <------> * We boost task t by manufacturing an rt_mutex that appears to |
| <------> * be held by task t. We leave a pointer to that rt_mutex where |
| <------> * task t can find it, and task t will release the mutex when it |
| <------> * exits its outermost RCU read-side critical section. Then |
| <------> * simply acquiring this artificial rt_mutex will boost task |
| <------> * t's priority. (Thanks to tglx for suggesting this approach!) |
| <------> * |
| <------> * Note that task t must acquire rnp->lock to remove itself from |
| <------> * the ->blkd_tasks list, which it will do from exit() if from |
| <------> * nowhere else. We therefore are guaranteed that task t will |
| <------> * stay around at least until we drop rnp->lock. Note that |
| <------> * rnp->lock also resolves races between our priority boosting |
| <------> * and task t's exiting its outermost RCU read-side critical |
| <------> * section. |
| <------> */ |
| <------>t = container_of(tb, struct task_struct, rcu_node_entry); |
| <------>rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); |
| <------>raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| <------> |
| <------>rt_mutex_lock(&rnp->boost_mtx); |
| <------>rt_mutex_unlock(&rnp->boost_mtx); |
| |
| <------>return READ_ONCE(rnp->exp_tasks) != NULL || |
| <------> READ_ONCE(rnp->boost_tasks) != NULL; |
| } |
| |
| |
| |
| |
| static int rcu_boost_kthread(void *arg) |
| { |
| <------>struct rcu_node *rnp = (struct rcu_node *)arg; |
| <------>int spincnt = 0; |
| <------>int more2boost; |
| |
| <------>trace_rcu_utilization(TPS("Start boost kthread@init")); |
| <------>for (;;) { |
| <------><------>WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING); |
| <------><------>trace_rcu_utilization(TPS("End boost kthread@rcu_wait")); |
| <------><------>rcu_wait(READ_ONCE(rnp->boost_tasks) || |
| <------><------><------> READ_ONCE(rnp->exp_tasks)); |
| <------><------>trace_rcu_utilization(TPS("Start boost kthread@rcu_wait")); |
| <------><------>WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING); |
| <------><------>more2boost = rcu_boost(rnp); |
| <------><------>if (more2boost) |
| <------><------><------>spincnt++; |
| <------><------>else |
| <------><------><------>spincnt = 0; |
| <------><------>if (spincnt > 10) { |
| <------><------><------>WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING); |
| <------><------><------>trace_rcu_utilization(TPS("End boost kthread@rcu_yield")); |
| <------><------><------>schedule_timeout_idle(2); |
| <------><------><------>trace_rcu_utilization(TPS("Start boost kthread@rcu_yield")); |
| <------><------><------>spincnt = 0; |
| <------><------>} |
| <------>} |
| <------> |
| <------>trace_rcu_utilization(TPS("End boost kthread@notreached")); |
| <------>return 0; |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
| <------>__releases(rnp->lock) |
| { |
| <------>raw_lockdep_assert_held_rcu_node(rnp); |
| <------>if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { |
| <------><------>raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| <------><------>return; |
| <------>} |
| <------>if (rnp->exp_tasks != NULL || |
| <------> (rnp->gp_tasks != NULL && |
| <------> rnp->boost_tasks == NULL && |
| <------> rnp->qsmask == 0 && |
| <------> (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld))) { |
| <------><------>if (rnp->exp_tasks == NULL) |
| <------><------><------>WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks); |
| <------><------>raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| <------><------>rcu_wake_cond(rnp->boost_kthread_task, |
| <------><------><------> READ_ONCE(rnp->boost_kthread_status)); |
| <------>} else { |
| <------><------>raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| <------>} |
| } |
| |
| |
| |
| |
| |
| static bool rcu_is_callbacks_kthread(void) |
| { |
| <------>return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current; |
| } |
| |
| #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) |
| |
| |
| |
| |
| static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) |
| { |
| <------>rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; |
| } |
| |
| |
| |
| |
| |
| |
| static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp) |
| { |
| <------>int rnp_index = rnp - rcu_get_root(); |
| <------>unsigned long flags; |
| <------>struct sched_param sp; |
| <------>struct task_struct *t; |
| |
| <------>if (!IS_ENABLED(CONFIG_PREEMPT_RCU)) |
| <------><------>return; |
| |
| <------>if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) |
| <------><------>return; |
| |
| <------>rcu_state.boost = 1; |
| |
| <------>if (rnp->boost_kthread_task != NULL) |
| <------><------>return; |
| |
| <------>t = kthread_create(rcu_boost_kthread, (void *)rnp, |
| <------><------><------> "rcub/%d", rnp_index); |
| <------>if (WARN_ON_ONCE(IS_ERR(t))) |
| <------><------>return; |
| |
| <------>raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| <------>rnp->boost_kthread_task = t; |
| <------>raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| <------>sp.sched_priority = kthread_prio; |
| <------>sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
| <------>wake_up_process(t); |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) |
| { |
| <------>struct task_struct *t = rnp->boost_kthread_task; |
| <------>unsigned long mask = rcu_rnp_online_cpus(rnp); |
| <------>cpumask_var_t cm; |
| <------>int cpu; |
| |
| <------>if (!t) |
| <------><------>return; |
| <------>if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) |
| <------><------>return; |
| <------>for_each_leaf_node_possible_cpu(rnp, cpu) |
| <------><------>if ((mask & leaf_node_cpu_bit(rnp, cpu)) && |
| <------><------> cpu != outgoingcpu) |
| <------><------><------>cpumask_set_cpu(cpu, cm); |
| <------>if (cpumask_weight(cm) == 0) |
| <------><------>cpumask_setall(cm); |
| <------>set_cpus_allowed_ptr(t, cm); |
| <------>free_cpumask_var(cm); |
| } |
| |
| |
| |
| |
| static void __init rcu_spawn_boost_kthreads(void) |
| { |
| <------>struct rcu_node *rnp; |
| |
| <------>rcu_for_each_leaf_node(rnp) |
| <------><------>rcu_spawn_one_boost_kthread(rnp); |
| } |
| |
| static void rcu_prepare_kthreads(int cpu) |
| { |
| <------>struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); |
| <------>struct rcu_node *rnp = rdp->mynode; |
| |
| <------> |
| <------>if (rcu_scheduler_fully_active) |
| <------><------>rcu_spawn_one_boost_kthread(rnp); |
| } |
| |
| #else |
| |
| static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
| <------>__releases(rnp->lock) |
| { |
| <------>raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| } |
| |
| static bool rcu_is_callbacks_kthread(void) |
| { |
| <------>return false; |
| } |
| |
| static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) |
| { |
| } |
| |
| static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) |
| { |
| } |
| |
| static void __init rcu_spawn_boost_kthreads(void) |
| { |
| } |
| |
| static void rcu_prepare_kthreads(int cpu) |
| { |
| } |
| |
| #endif |
| |
| #if !defined(CONFIG_RCU_FAST_NO_HZ) |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| int rcu_needs_cpu(u64 basemono, u64 *nextevt) |
| { |
| <------>*nextevt = KTIME_MAX; |
| <------>return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) && |
| <------> !rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist); |
| } |
| |
| |
| |
| |
| |
| static void rcu_cleanup_after_idle(void) |
| { |
| } |
| |
| |
| |
| |
| |
| static void rcu_prepare_for_idle(void) |
| { |
| } |
| |
| #else |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| #define RCU_IDLE_GP_DELAY 4 |
| |
| static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY; |
| module_param(rcu_idle_gp_delay, int, 0644); |
| |
| |
| |
| |
| |
| |
| static bool __maybe_unused rcu_try_advance_all_cbs(void) |
| { |
| <------>bool cbs_ready = false; |
| <------>struct rcu_data *rdp = this_cpu_ptr(&rcu_data); |
| <------>struct rcu_node *rnp; |
| |
| <------> |
| <------>if (jiffies == rdp->last_advance_all) |
| <------><------>return false; |
| <------>rdp->last_advance_all = jiffies; |
| |
| <------>rnp = rdp->mynode; |
| |
| <------> |
| <------> * Don't bother checking unless a grace period has |
| <------> * completed since we last checked and there are |
| <------> * callbacks not yet ready to invoke. |
| <------> */ |
| <------>if ((rcu_seq_completed_gp(rdp->gp_seq, |
| <------><------><------><------> rcu_seq_current(&rnp->gp_seq)) || |
| <------> unlikely(READ_ONCE(rdp->gpwrap))) && |
| <------> rcu_segcblist_pend_cbs(&rdp->cblist)) |
| <------><------>note_gp_changes(rdp); |
| |
| <------>if (rcu_segcblist_ready_cbs(&rdp->cblist)) |
| <------><------>cbs_ready = true; |
| <------>return cbs_ready; |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| int rcu_needs_cpu(u64 basemono, u64 *nextevt) |
| { |
| <------>struct rcu_data *rdp = this_cpu_ptr(&rcu_data); |
| <------>unsigned long dj; |
| |
| <------>lockdep_assert_irqs_disabled(); |
| |
| <------> |
| <------>if (rcu_segcblist_empty(&rdp->cblist) || |
| <------> rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist)) { |
| <------><------>*nextevt = KTIME_MAX; |
| <------><------>return 0; |
| <------>} |
| |
| <------> |
| <------>if (rcu_try_advance_all_cbs()) { |
| <------><------> |
| <------><------>invoke_rcu_core(); |
| <------><------>return 1; |
| <------>} |
| <------>rdp->last_accelerate = jiffies; |
| |
| <------> |
| <------>dj = round_up(rcu_idle_gp_delay + jiffies, rcu_idle_gp_delay) - jiffies; |
| |
| <------>*nextevt = basemono + dj * TICK_NSEC; |
| <------>return 0; |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static void rcu_prepare_for_idle(void) |
| { |
| <------>bool needwake; |
| <------>struct rcu_data *rdp = this_cpu_ptr(&rcu_data); |
| <------>struct rcu_node *rnp; |
| <------>int tne; |
| |
| <------>lockdep_assert_irqs_disabled(); |
| <------>if (rcu_segcblist_is_offloaded(&rdp->cblist)) |
| <------><------>return; |
| |
| <------> |
| <------>tne = READ_ONCE(tick_nohz_active); |
| <------>if (tne != rdp->tick_nohz_enabled_snap) { |
| <------><------>if (!rcu_segcblist_empty(&rdp->cblist)) |
| <------><------><------>invoke_rcu_core(); |
| <------><------>rdp->tick_nohz_enabled_snap = tne; |
| <------><------>return; |
| <------>} |
| <------>if (!tne) |
| <------><------>return; |
| |
| <------> |
| <------> * If we have not yet accelerated this jiffy, accelerate all |
| <------> * callbacks on this CPU. |
| <------> */ |
| <------>if (rdp->last_accelerate == jiffies) |
| <------><------>return; |
| <------>rdp->last_accelerate = jiffies; |
| <------>if (rcu_segcblist_pend_cbs(&rdp->cblist)) { |
| <------><------>rnp = rdp->mynode; |
| <------><------>raw_spin_lock_rcu_node(rnp); |
| <------><------>needwake = rcu_accelerate_cbs(rnp, rdp); |
| <------><------>raw_spin_unlock_rcu_node(rnp); |
| <------><------>if (needwake) |
| <------><------><------>rcu_gp_kthread_wake(); |
| <------>} |
| } |
| |
| |
| |
| |
| |
| |
| static void rcu_cleanup_after_idle(void) |
| { |
| <------>struct rcu_data *rdp = this_cpu_ptr(&rcu_data); |
| |
| <------>lockdep_assert_irqs_disabled(); |
| <------>if (rcu_segcblist_is_offloaded(&rdp->cblist)) |
| <------><------>return; |
| <------>if (rcu_try_advance_all_cbs()) |
| <------><------>invoke_rcu_core(); |
| } |
| |
| #endif |
| |
| #ifdef CONFIG_RCU_NOCB_CPU |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static int __init rcu_nocb_setup(char *str) |
| { |
| <------>alloc_bootmem_cpumask_var(&rcu_nocb_mask); |
| <------>if (!strcasecmp(str, "all")) |
| <------><------>cpumask_setall(rcu_nocb_mask); |
| <------>else |
| <------><------>if (cpulist_parse(str, rcu_nocb_mask)) { |
| <------><------><------>pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n"); |
| <------><------><------>cpumask_setall(rcu_nocb_mask); |
| <------><------>} |
| <------>return 1; |
| } |
| __setup("rcu_nocbs=", rcu_nocb_setup); |
| |
| static int __init parse_rcu_nocb_poll(char *arg) |
| { |
| <------>rcu_nocb_poll = true; |
| <------>return 0; |
| } |
| early_param("rcu_nocb_poll", parse_rcu_nocb_poll); |
| |
| |
| |
| |
| |
| |
| int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ; |
| module_param(nocb_nobypass_lim_per_jiffy, int, 0); |
| |
| |
| |
| |
| |
| |
| static void rcu_nocb_bypass_lock(struct rcu_data *rdp) |
| <------>__acquires(&rdp->nocb_bypass_lock) |
| { |
| <------>lockdep_assert_irqs_disabled(); |
| <------>if (raw_spin_trylock(&rdp->nocb_bypass_lock)) |
| <------><------>return; |
| <------>atomic_inc(&rdp->nocb_lock_contended); |
| <------>WARN_ON_ONCE(smp_processor_id() != rdp->cpu); |
| <------>smp_mb__after_atomic(); |
| <------>raw_spin_lock(&rdp->nocb_bypass_lock); |
| <------>smp_mb__before_atomic(); |
| <------>atomic_dec(&rdp->nocb_lock_contended); |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static void rcu_nocb_wait_contended(struct rcu_data *rdp) |
| { |
| <------>WARN_ON_ONCE(smp_processor_id() != rdp->cpu); |
| <------>while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended))) |
| <------><------>cpu_relax(); |
| } |
| |
| |
| |
| |
| |
| static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp) |
| { |
| <------>lockdep_assert_irqs_disabled(); |
| <------>return raw_spin_trylock(&rdp->nocb_bypass_lock); |
| } |
| |
| |
| |
| |
| static void rcu_nocb_bypass_unlock(struct rcu_data *rdp) |
| <------>__releases(&rdp->nocb_bypass_lock) |
| { |
| <------>lockdep_assert_irqs_disabled(); |
| <------>raw_spin_unlock(&rdp->nocb_bypass_lock); |
| } |
| |
| |
| |
| |
| |
| static void rcu_nocb_lock(struct rcu_data *rdp) |
| { |
| <------>lockdep_assert_irqs_disabled(); |
| <------>if (!rcu_segcblist_is_offloaded(&rdp->cblist)) |
| <------><------>return; |
| <------>raw_spin_lock(&rdp->nocb_lock); |
| } |
| |
| |
| |
| |
| |
| static void rcu_nocb_unlock(struct rcu_data *rdp) |
| { |
| <------>if (rcu_segcblist_is_offloaded(&rdp->cblist)) { |
| <------><------>lockdep_assert_irqs_disabled(); |
| <------><------>raw_spin_unlock(&rdp->nocb_lock); |
| <------>} |
| } |
| |
| |
| |
| |
| |
| static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, |
| <------><------><------><------> unsigned long flags) |
| { |
| <------>if (rcu_segcblist_is_offloaded(&rdp->cblist)) { |
| <------><------>lockdep_assert_irqs_disabled(); |
| <------><------>raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); |
| <------>} else { |
| <------><------>local_irq_restore(flags); |
| <------>} |
| } |
| |
| |
| static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp) |
| { |
| <------>lockdep_assert_irqs_disabled(); |
| <------>if (rcu_segcblist_is_offloaded(&rdp->cblist)) |
| <------><------>lockdep_assert_held(&rdp->nocb_lock); |
| } |
| |
| |
| |
| |
| |
| static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) |
| { |
| <------>swake_up_all(sq); |
| } |
| |
| static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) |
| { |
| <------>return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1]; |
| } |
| |
| static void rcu_init_one_nocb(struct rcu_node *rnp) |
| { |
| <------>init_swait_queue_head(&rnp->nocb_gp_wq[0]); |
| <------>init_swait_queue_head(&rnp->nocb_gp_wq[1]); |
| } |
| |
| |
| bool rcu_is_nocb_cpu(int cpu) |
| { |
| <------>if (cpumask_available(rcu_nocb_mask)) |
| <------><------>return cpumask_test_cpu(cpu, rcu_nocb_mask); |
| <------>return false; |
| } |
| |
| |
| |
| |
| |
| static void wake_nocb_gp(struct rcu_data *rdp, bool force, |
| <------><------><------> unsigned long flags) |
| <------>__releases(rdp->nocb_lock) |
| { |
| <------>bool needwake = false; |
| <------>struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; |
| |
| <------>lockdep_assert_held(&rdp->nocb_lock); |
| <------>if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) { |
| <------><------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, |
| <------><------><------><------> TPS("AlreadyAwake")); |
| <------><------>rcu_nocb_unlock_irqrestore(rdp, flags); |
| <------><------>return; |
| <------>} |
| |
| <------>if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) { |
| <------><------>WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); |
| <------><------>del_timer(&rdp->nocb_timer); |
| <------>} |
| <------>rcu_nocb_unlock_irqrestore(rdp, flags); |
| <------>raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); |
| <------>if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) { |
| <------><------>WRITE_ONCE(rdp_gp->nocb_gp_sleep, false); |
| <------><------>needwake = true; |
| <------><------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); |
| <------>} |
| <------>raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); |
| <------>if (needwake) |
| <------><------>wake_up_process(rdp_gp->nocb_gp_kthread); |
| } |
| |
| |
| |
| |
| |
| static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype, |
| <------><------><------> const char *reason) |
| { |
| <------>if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) |
| <------><------>mod_timer(&rdp->nocb_timer, jiffies + 1); |
| <------>if (rdp->nocb_defer_wakeup < waketype) |
| <------><------>WRITE_ONCE(rdp->nocb_defer_wakeup, waketype); |
| <------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason); |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, |
| <------><------><------><------> unsigned long j) |
| { |
| <------>struct rcu_cblist rcl; |
| |
| <------>WARN_ON_ONCE(!rcu_segcblist_is_offloaded(&rdp->cblist)); |
| <------>rcu_lockdep_assert_cblist_protected(rdp); |
| <------>lockdep_assert_held(&rdp->nocb_bypass_lock); |
| <------>if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) { |
| <------><------>raw_spin_unlock(&rdp->nocb_bypass_lock); |
| <------><------>return false; |
| <------>} |
| <------> |
| <------>if (rhp) |
| <------><------>rcu_segcblist_inc_len(&rdp->cblist); |
| <------>rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp); |
| <------>rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl); |
| <------>WRITE_ONCE(rdp->nocb_bypass_first, j); |
| <------>rcu_nocb_bypass_unlock(rdp); |
| <------>return true; |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, |
| <------><------><------><------> unsigned long j) |
| { |
| <------>if (!rcu_segcblist_is_offloaded(&rdp->cblist)) |
| <------><------>return true; |
| <------>rcu_lockdep_assert_cblist_protected(rdp); |
| <------>rcu_nocb_bypass_lock(rdp); |
| <------>return rcu_nocb_do_flush_bypass(rdp, rhp, j); |
| } |
| |
| |
| |
| |
| |
| static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j) |
| { |
| <------>rcu_lockdep_assert_cblist_protected(rdp); |
| <------>if (!rcu_segcblist_is_offloaded(&rdp->cblist) || |
| <------> !rcu_nocb_bypass_trylock(rdp)) |
| <------><------>return; |
| <------>WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j)); |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, |
| <------><------><------><------>bool *was_alldone, unsigned long flags) |
| { |
| <------>unsigned long c; |
| <------>unsigned long cur_gp_seq; |
| <------>unsigned long j = jiffies; |
| <------>long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); |
| |
| <------>if (!rcu_segcblist_is_offloaded(&rdp->cblist)) { |
| <------><------>*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); |
| <------><------>return false; |
| <------>} |
| <------>lockdep_assert_irqs_disabled(); |
| |
| <------> |
| <------>if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) { |
| <------><------>rcu_nocb_lock(rdp); |
| <------><------>WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); |
| <------><------>*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); |
| <------><------>return false; |
| <------>} |
| |
| <------> |
| <------> |
| <------>if (j == rdp->nocb_nobypass_last) { |
| <------><------>c = rdp->nocb_nobypass_count + 1; |
| <------>} else { |
| <------><------>WRITE_ONCE(rdp->nocb_nobypass_last, j); |
| <------><------>c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy; |
| <------><------>if (ULONG_CMP_LT(rdp->nocb_nobypass_count, |
| <------><------><------><------> nocb_nobypass_lim_per_jiffy)) |
| <------><------><------>c = 0; |
| <------><------>else if (c > nocb_nobypass_lim_per_jiffy) |
| <------><------><------>c = nocb_nobypass_lim_per_jiffy; |
| <------>} |
| <------>WRITE_ONCE(rdp->nocb_nobypass_count, c); |
| |
| <------> |
| <------> |
| <------> |
| <------>if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) { |
| <------><------>rcu_nocb_lock(rdp); |
| <------><------>*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); |
| <------><------>if (*was_alldone) |
| <------><------><------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, |
| <------><------><------><------><------> TPS("FirstQ")); |
| <------><------>WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j)); |
| <------><------>WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); |
| <------><------>return false; |
| <------>} |
| |
| <------> |
| <------> |
| <------>if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) || |
| <------> ncbs >= qhimark) { |
| <------><------>rcu_nocb_lock(rdp); |
| <------><------>if (!rcu_nocb_flush_bypass(rdp, rhp, j)) { |
| <------><------><------>*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); |
| <------><------><------>if (*was_alldone) |
| <------><------><------><------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, |
| <------><------><------><------><------><------> TPS("FirstQ")); |
| <------><------><------>WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); |
| <------><------><------>return false; |
| <------><------>} |
| <------><------>if (j != rdp->nocb_gp_adv_time && |
| <------><------> rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && |
| <------><------> rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { |
| <------><------><------>rcu_advance_cbs_nowake(rdp->mynode, rdp); |
| <------><------><------>rdp->nocb_gp_adv_time = j; |
| <------><------>} |
| <------><------>rcu_nocb_unlock_irqrestore(rdp, flags); |
| <------><------>return true; |
| <------>} |
| |
| <------> |
| <------>rcu_nocb_wait_contended(rdp); |
| <------>rcu_nocb_bypass_lock(rdp); |
| <------>ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); |
| <------>rcu_segcblist_inc_len(&rdp->cblist); |
| <------>rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); |
| <------>if (!ncbs) { |
| <------><------>WRITE_ONCE(rdp->nocb_bypass_first, j); |
| <------><------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ")); |
| <------>} |
| <------>rcu_nocb_bypass_unlock(rdp); |
| <------>smp_mb(); |
| <------>if (ncbs) { |
| <------><------>local_irq_restore(flags); |
| <------>} else { |
| <------><------> |
| <------><------>rcu_nocb_lock(rdp); |
| <------><------>if (!rcu_segcblist_pend_cbs(&rdp->cblist)) { |
| <------><------><------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, |
| <------><------><------><------><------> TPS("FirstBQwake")); |
| <------><------><------>__call_rcu_nocb_wake(rdp, true, flags); |
| <------><------>} else { |
| <------><------><------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, |
| <------><------><------><------><------> TPS("FirstBQnoWake")); |
| <------><------><------>rcu_nocb_unlock_irqrestore(rdp, flags); |
| <------><------>} |
| <------>} |
| <------>return true; |
| } |
| |
| |
| |
| |
| |
| |
| |
| static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, |
| <------><------><------><------> unsigned long flags) |
| <------><------><------><------> __releases(rdp->nocb_lock) |
| { |
| <------>unsigned long cur_gp_seq; |
| <------>unsigned long j; |
| <------>long len; |
| <------>struct task_struct *t; |
| |
| <------> |
| <------>t = READ_ONCE(rdp->nocb_gp_kthread); |
| <------>if (rcu_nocb_poll || !t) { |
| <------><------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, |
| <------><------><------><------> TPS("WakeNotPoll")); |
| <------><------>rcu_nocb_unlock_irqrestore(rdp, flags); |
| <------><------>return; |
| <------>} |
| <------> |
| <------>len = rcu_segcblist_n_cbs(&rdp->cblist); |
| <------>if (was_alldone) { |
| <------><------>rdp->qlen_last_fqs_check = len; |
| <------><------>if (!irqs_disabled_flags(flags)) { |
| <------><------><------> |
| <------><------><------>wake_nocb_gp(rdp, false, flags); |
| <------><------><------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, |
| <------><------><------><------><------> TPS("WakeEmpty")); |
| <------><------>} else { |
| <------><------><------>wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE, |
| <------><------><------><------><------> TPS("WakeEmptyIsDeferred")); |
| <------><------><------>rcu_nocb_unlock_irqrestore(rdp, flags); |
| <------><------>} |
| <------>} else if (len > rdp->qlen_last_fqs_check + qhimark) { |
| <------><------> |
| <------><------>rdp->qlen_last_fqs_check = len; |
| <------><------>j = jiffies; |
| <------><------>if (j != rdp->nocb_gp_adv_time && |
| <------><------> rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && |
| <------><------> rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { |
| <------><------><------>rcu_advance_cbs_nowake(rdp->mynode, rdp); |
| <------><------><------>rdp->nocb_gp_adv_time = j; |
| <------><------>} |
| <------><------>smp_mb(); |
| <------><------>if ((rdp->nocb_cb_sleep || |
| <------><------> !rcu_segcblist_ready_cbs(&rdp->cblist)) && |
| <------><------> !timer_pending(&rdp->nocb_bypass_timer)) |
| <------><------><------>wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE, |
| <------><------><------><------><------> TPS("WakeOvfIsDeferred")); |
| <------><------>rcu_nocb_unlock_irqrestore(rdp, flags); |
| <------>} else { |
| <------><------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); |
| <------><------>rcu_nocb_unlock_irqrestore(rdp, flags); |
| <------>} |
| <------>return; |
| } |
| |
| |
| static void do_nocb_bypass_wakeup_timer(struct timer_list *t) |
| { |
| <------>unsigned long flags; |
| <------>struct rcu_data *rdp = from_timer(rdp, t, nocb_bypass_timer); |
| |
| <------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer")); |
| <------>rcu_nocb_lock_irqsave(rdp, flags); |
| <------>smp_mb__after_spinlock(); |
| <------>__call_rcu_nocb_wake(rdp, true, flags); |
| } |
| |
| |
| |
| |
| |
| static void nocb_gp_wait(struct rcu_data *my_rdp) |
| { |
| <------>bool bypass = false; |
| <------>long bypass_ncbs; |
| <------>int __maybe_unused cpu = my_rdp->cpu; |
| <------>unsigned long cur_gp_seq; |
| <------>unsigned long flags; |
| <------>bool gotcbs = false; |
| <------>unsigned long j = jiffies; |
| <------>bool needwait_gp = false; |
| <------>bool needwake; |
| <------>bool needwake_gp; |
| <------>struct rcu_data *rdp; |
| <------>struct rcu_node *rnp; |
| <------>unsigned long wait_gp_seq = 0; |
| <------>bool wasempty = false; |
| |
| <------> |
| <------> * Each pass through the following loop checks for CBs and for the |
| <------> * nearest grace period (if any) to wait for next. The CB kthreads |
| <------> * and the global grace-period kthread are awakened if needed. |
| <------> */ |
| <------>WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp); |
| <------>for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) { |
| <------><------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check")); |
| <------><------>rcu_nocb_lock_irqsave(rdp, flags); |
| <------><------>bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); |
| <------><------>if (bypass_ncbs && |
| <------><------> (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) || |
| <------><------> bypass_ncbs > 2 * qhimark)) { |
| <------><------><------> |
| <------><------><------>(void)rcu_nocb_try_flush_bypass(rdp, j); |
| <------><------><------>bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); |
| <------><------>} else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) { |
| <------><------><------>rcu_nocb_unlock_irqrestore(rdp, flags); |
| <------><------><------>continue; |
| <------><------>} |
| <------><------>if (bypass_ncbs) { |
| <------><------><------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, |
| <------><------><------><------><------> TPS("Bypass")); |
| <------><------><------>bypass = true; |
| <------><------>} |
| <------><------>rnp = rdp->mynode; |
| <------><------>if (bypass) { |
| <------><------><------>WRITE_ONCE(my_rdp->nocb_defer_wakeup, |
| <------><------><------><------> RCU_NOCB_WAKE_NOT); |
| <------><------><------>del_timer(&my_rdp->nocb_timer); |
| <------><------>} |
| <------><------> |
| <------><------>needwake_gp = false; |
| <------><------>if (!rcu_segcblist_restempty(&rdp->cblist, |
| <------><------><------><------><------> RCU_NEXT_READY_TAIL) || |
| <------><------> (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && |
| <------><------> rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) { |
| <------><------><------>raw_spin_lock_rcu_node(rnp); |
| <------><------><------>needwake_gp = rcu_advance_cbs(rnp, rdp); |
| <------><------><------>wasempty = rcu_segcblist_restempty(&rdp->cblist, |
| <------><------><------><------><------><------><------> RCU_NEXT_READY_TAIL); |
| <------><------><------>raw_spin_unlock_rcu_node(rnp); |
| <------><------>} |
| <------><------> |
| <------><------>WARN_ON_ONCE(wasempty && |
| <------><------><------> !rcu_segcblist_restempty(&rdp->cblist, |
| <------><------><------><------><------><------> RCU_NEXT_READY_TAIL)); |
| <------><------>if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) { |
| <------><------><------>if (!needwait_gp || |
| <------><------><------> ULONG_CMP_LT(cur_gp_seq, wait_gp_seq)) |
| <------><------><------><------>wait_gp_seq = cur_gp_seq; |
| <------><------><------>needwait_gp = true; |
| <------><------><------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, |
| <------><------><------><------><------> TPS("NeedWaitGP")); |
| <------><------>} |
| <------><------>if (rcu_segcblist_ready_cbs(&rdp->cblist)) { |
| <------><------><------>needwake = rdp->nocb_cb_sleep; |
| <------><------><------>WRITE_ONCE(rdp->nocb_cb_sleep, false); |
| <------><------><------>smp_mb(); |
| <------><------>} else { |
| <------><------><------>needwake = false; |
| <------><------>} |
| <------><------>rcu_nocb_unlock_irqrestore(rdp, flags); |
| <------><------>if (needwake) { |
| <------><------><------>swake_up_one(&rdp->nocb_cb_wq); |
| <------><------><------>gotcbs = true; |
| <------><------>} |
| <------><------>if (needwake_gp) |
| <------><------><------>rcu_gp_kthread_wake(); |
| <------>} |
| |
| <------>my_rdp->nocb_gp_bypass = bypass; |
| <------>my_rdp->nocb_gp_gp = needwait_gp; |
| <------>my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0; |
| <------>if (bypass && !rcu_nocb_poll) { |
| <------><------> |
| <------><------> |
| <------><------>raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); |
| <------><------>mod_timer(&my_rdp->nocb_bypass_timer, j + 2); |
| <------><------>raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); |
| <------>} |
| <------>if (rcu_nocb_poll) { |
| <------><------> |
| <------><------>if (gotcbs) |
| <------><------><------>trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll")); |
| <------><------>schedule_timeout_idle(1); |
| <------>} else if (!needwait_gp) { |
| <------><------> |
| <------><------>trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep")); |
| <------><------>swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq, |
| <------><------><------><------>!READ_ONCE(my_rdp->nocb_gp_sleep)); |
| <------><------>trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep")); |
| <------>} else { |
| <------><------>rnp = my_rdp->mynode; |
| <------><------>trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait")); |
| <------><------>swait_event_interruptible_exclusive( |
| <------><------><------>rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1], |
| <------><------><------>rcu_seq_done(&rnp->gp_seq, wait_gp_seq) || |
| <------><------><------>!READ_ONCE(my_rdp->nocb_gp_sleep)); |
| <------><------>trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait")); |
| <------>} |
| <------>if (!rcu_nocb_poll) { |
| <------><------>raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags); |
| <------><------>if (bypass) |
| <------><------><------>del_timer(&my_rdp->nocb_bypass_timer); |
| <------><------>WRITE_ONCE(my_rdp->nocb_gp_sleep, true); |
| <------><------>raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags); |
| <------>} |
| <------>my_rdp->nocb_gp_seq = -1; |
| <------>WARN_ON(signal_pending(current)); |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static int rcu_nocb_gp_kthread(void *arg) |
| { |
| <------>struct rcu_data *rdp = arg; |
| |
| <------>for (;;) { |
| <------><------>WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1); |
| <------><------>nocb_gp_wait(rdp); |
| <------><------>cond_resched_tasks_rcu_qs(); |
| <------>} |
| <------>return 0; |
| } |
| |
| |
| |
| |
| |
| static void nocb_cb_wait(struct rcu_data *rdp) |
| { |
| <------>unsigned long cur_gp_seq; |
| <------>unsigned long flags; |
| <------>bool needwake_gp = false; |
| <------>struct rcu_node *rnp = rdp->mynode; |
| |
| <------>local_irq_save(flags); |
| <------>rcu_momentary_dyntick_idle(); |
| <------>local_irq_restore(flags); |
| <------>local_bh_disable(); |
| <------>rcu_do_batch(rdp); |
| <------>local_bh_enable(); |
| <------>lockdep_assert_irqs_enabled(); |
| <------>rcu_nocb_lock_irqsave(rdp, flags); |
| <------>if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && |
| <------> rcu_seq_done(&rnp->gp_seq, cur_gp_seq) && |
| <------> raw_spin_trylock_rcu_node(rnp)) { |
| <------><------>needwake_gp = rcu_advance_cbs(rdp->mynode, rdp); |
| <------><------>raw_spin_unlock_rcu_node(rnp); |
| <------>} |
| <------>if (rcu_segcblist_ready_cbs(&rdp->cblist)) { |
| <------><------>rcu_nocb_unlock_irqrestore(rdp, flags); |
| <------><------>if (needwake_gp) |
| <------><------><------>rcu_gp_kthread_wake(); |
| <------><------>return; |
| <------>} |
| |
| <------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep")); |
| <------>WRITE_ONCE(rdp->nocb_cb_sleep, true); |
| <------>rcu_nocb_unlock_irqrestore(rdp, flags); |
| <------>if (needwake_gp) |
| <------><------>rcu_gp_kthread_wake(); |
| <------>swait_event_interruptible_exclusive(rdp->nocb_cb_wq, |
| <------><------><------><------> !READ_ONCE(rdp->nocb_cb_sleep)); |
| <------>if (!smp_load_acquire(&rdp->nocb_cb_sleep)) { |
| <------><------> |
| <------><------>return; |
| <------>} |
| <------>WARN_ON(signal_pending(current)); |
| <------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); |
| } |
| |
| |
| |
| |
| |
| static int rcu_nocb_cb_kthread(void *arg) |
| { |
| <------>struct rcu_data *rdp = arg; |
| |
| <------> |
| <------> |
| <------>for (;;) { |
| <------><------>nocb_cb_wait(rdp); |
| <------><------>cond_resched_tasks_rcu_qs(); |
| <------>} |
| <------>return 0; |
| } |
| |
| |
| static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) |
| { |
| <------>return READ_ONCE(rdp->nocb_defer_wakeup); |
| } |
| |
| |
| static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp) |
| { |
| <------>unsigned long flags; |
| <------>int ndw; |
| |
| <------>rcu_nocb_lock_irqsave(rdp, flags); |
| <------>if (!rcu_nocb_need_deferred_wakeup(rdp)) { |
| <------><------>rcu_nocb_unlock_irqrestore(rdp, flags); |
| <------><------>return; |
| <------>} |
| <------>ndw = READ_ONCE(rdp->nocb_defer_wakeup); |
| <------>wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags); |
| <------>trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake")); |
| } |
| |
| |
| static void do_nocb_deferred_wakeup_timer(struct timer_list *t) |
| { |
| <------>struct rcu_data *rdp = from_timer(rdp, t, nocb_timer); |
| |
| <------>do_nocb_deferred_wakeup_common(rdp); |
| } |
| |
| |
| |
| |
| |
| |
| static void do_nocb_deferred_wakeup(struct rcu_data *rdp) |
| { |
| <------>if (rcu_nocb_need_deferred_wakeup(rdp)) |
| <------><------>do_nocb_deferred_wakeup_common(rdp); |
| } |
| |
| void rcu_nocb_flush_deferred_wakeup(void) |
| { |
| <------>do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data)); |
| } |
| |
| void __init rcu_init_nohz(void) |
| { |
| <------>int cpu; |
| <------>bool need_rcu_nocb_mask = false; |
| <------>struct rcu_data *rdp; |
| |
| #if defined(CONFIG_NO_HZ_FULL) |
| <------>if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask)) |
| <------><------>need_rcu_nocb_mask = true; |
| #endif |
| |
| <------>if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) { |
| <------><------>if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) { |
| <------><------><------>pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n"); |
| <------><------><------>return; |
| <------><------>} |
| <------>} |
| <------>if (!cpumask_available(rcu_nocb_mask)) |
| <------><------>return; |
| |
| #if defined(CONFIG_NO_HZ_FULL) |
| <------>if (tick_nohz_full_running) |
| <------><------>cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask); |
| #endif |
| |
| <------>if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) { |
| <------><------>pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n"); |
| <------><------>cpumask_and(rcu_nocb_mask, cpu_possible_mask, |
| <------><------><------> rcu_nocb_mask); |
| <------>} |
| <------>if (cpumask_empty(rcu_nocb_mask)) |
| <------><------>pr_info("\tOffload RCU callbacks from CPUs: (none).\n"); |
| <------>else |
| <------><------>pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n", |
| <------><------><------>cpumask_pr_args(rcu_nocb_mask)); |
| <------>if (rcu_nocb_poll) |
| <------><------>pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); |
| |
| <------>for_each_cpu(cpu, rcu_nocb_mask) { |
| <------><------>rdp = per_cpu_ptr(&rcu_data, cpu); |
| <------><------>if (rcu_segcblist_empty(&rdp->cblist)) |
| <------><------><------>rcu_segcblist_init(&rdp->cblist); |
| <------><------>rcu_segcblist_offload(&rdp->cblist); |
| <------>} |
| <------>rcu_organize_nocb_kthreads(); |
| } |
| |
| |
| static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) |
| { |
| <------>init_swait_queue_head(&rdp->nocb_cb_wq); |
| <------>init_swait_queue_head(&rdp->nocb_gp_wq); |
| <------>raw_spin_lock_init(&rdp->nocb_lock); |
| <------>raw_spin_lock_init(&rdp->nocb_bypass_lock); |
| <------>raw_spin_lock_init(&rdp->nocb_gp_lock); |
| <------>timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0); |
| <------>timer_setup(&rdp->nocb_bypass_timer, do_nocb_bypass_wakeup_timer, 0); |
| <------>rcu_cblist_init(&rdp->nocb_bypass); |
| } |
| |
| |
| |
| |
| |
| |
| static void rcu_spawn_one_nocb_kthread(int cpu) |
| { |
| <------>struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); |
| <------>struct rcu_data *rdp_gp; |
| <------>struct task_struct *t; |
| |
| <------> |
| <------> * If this isn't a no-CBs CPU or if it already has an rcuo kthread, |
| <------> * then nothing to do. |
| <------> */ |
| <------>if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread) |
| <------><------>return; |
| |
| <------> |
| <------>rdp_gp = rdp->nocb_gp_rdp; |
| <------>if (!rdp_gp->nocb_gp_kthread) { |
| <------><------>t = kthread_run(rcu_nocb_gp_kthread, rdp_gp, |
| <------><------><------><------>"rcuog/%d", rdp_gp->cpu); |
| <------><------>if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__)) |
| <------><------><------>return; |
| <------><------>WRITE_ONCE(rdp_gp->nocb_gp_kthread, t); |
| <------>} |
| |
| <------> |
| <------>t = kthread_run(rcu_nocb_cb_kthread, rdp, |
| <------><------><------>"rcuo%c/%d", rcu_state.abbr, cpu); |
| <------>if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__)) |
| <------><------>return; |
| <------>WRITE_ONCE(rdp->nocb_cb_kthread, t); |
| <------>WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread); |
| } |
| |
| |
| |
| |
| |
| static void rcu_spawn_cpu_nocb_kthread(int cpu) |
| { |
| <------>if (rcu_scheduler_fully_active) |
| <------><------>rcu_spawn_one_nocb_kthread(cpu); |
| } |
| |
| |
| |
| |
| |
| |
| |
| static void __init rcu_spawn_nocb_kthreads(void) |
| { |
| <------>int cpu; |
| |
| <------>for_each_online_cpu(cpu) |
| <------><------>rcu_spawn_cpu_nocb_kthread(cpu); |
| } |
| |
| |
| static int rcu_nocb_gp_stride = -1; |
| module_param(rcu_nocb_gp_stride, int, 0444); |
| |
| |
| |
| |
| static void __init rcu_organize_nocb_kthreads(void) |
| { |
| <------>int cpu; |
| <------>bool firsttime = true; |
| <------>bool gotnocbs = false; |
| <------>bool gotnocbscbs = true; |
| <------>int ls = rcu_nocb_gp_stride; |
| <------>int nl = 0; |
| <------>struct rcu_data *rdp; |
| <------>struct rcu_data *rdp_gp = NULL; |
| <------>struct rcu_data *rdp_prev = NULL; |
| |
| <------>if (!cpumask_available(rcu_nocb_mask)) |
| <------><------>return; |
| <------>if (ls == -1) { |
| <------><------>ls = nr_cpu_ids / int_sqrt(nr_cpu_ids); |
| <------><------>rcu_nocb_gp_stride = ls; |
| <------>} |
| |
| <------> |
| <------> * Each pass through this loop sets up one rcu_data structure. |
| <------> * Should the corresponding CPU come online in the future, then |
| <------> * we will spawn the needed set of rcu_nocb_kthread() kthreads. |
| <------> */ |
| <------>for_each_cpu(cpu, rcu_nocb_mask) { |
| <------><------>rdp = per_cpu_ptr(&rcu_data, cpu); |
| <------><------>if (rdp->cpu >= nl) { |
| <------><------><------> |
| <------><------><------>gotnocbs = true; |
| <------><------><------>nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; |
| <------><------><------>rdp->nocb_gp_rdp = rdp; |
| <------><------><------>rdp_gp = rdp; |
| <------><------><------>if (dump_tree) { |
| <------><------><------><------>if (!firsttime) |
| <------><------><------><------><------>pr_cont("%s\n", gotnocbscbs |
| <------><------><------><------><------><------><------>? "" : " (self only)"); |
| <------><------><------><------>gotnocbscbs = false; |
| <------><------><------><------>firsttime = false; |
| <------><------><------><------>pr_alert("%s: No-CB GP kthread CPU %d:", |
| <------><------><------><------><------> __func__, cpu); |
| <------><------><------>} |
| <------><------>} else { |
| <------><------><------> |
| <------><------><------>gotnocbscbs = true; |
| <------><------><------>rdp->nocb_gp_rdp = rdp_gp; |
| <------><------><------>rdp_prev->nocb_next_cb_rdp = rdp; |
| <------><------><------>if (dump_tree) |
| <------><------><------><------>pr_cont(" %d", cpu); |
| <------><------>} |
| <------><------>rdp_prev = rdp; |
| <------>} |
| <------>if (gotnocbs && dump_tree) |
| <------><------>pr_cont("%s\n", gotnocbscbs ? "" : " (self only)"); |
| } |
| |
| |
| |
| |
| |
| void rcu_bind_current_to_nocb(void) |
| { |
| <------>if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask)) |
| <------><------>WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask)); |
| } |
| EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb); |
| |
| |
| |
| |
| |
| static void show_rcu_nocb_gp_state(struct rcu_data *rdp) |
| { |
| <------>struct rcu_node *rnp = rdp->mynode; |
| |
| <------>pr_info("nocb GP %d %c%c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu\n", |
| <------><------>rdp->cpu, |
| <------><------>"kK"[!!rdp->nocb_gp_kthread], |
| <------><------>"lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)], |
| <------><------>"dD"[!!rdp->nocb_defer_wakeup], |
| <------><------>"tT"[timer_pending(&rdp->nocb_timer)], |
| <------><------>"bB"[timer_pending(&rdp->nocb_bypass_timer)], |
| <------><------>"sS"[!!rdp->nocb_gp_sleep], |
| <------><------>".W"[swait_active(&rdp->nocb_gp_wq)], |
| <------><------>".W"[swait_active(&rnp->nocb_gp_wq[0])], |
| <------><------>".W"[swait_active(&rnp->nocb_gp_wq[1])], |
| <------><------>".B"[!!rdp->nocb_gp_bypass], |
| <------><------>".G"[!!rdp->nocb_gp_gp], |
| <------><------>(long)rdp->nocb_gp_seq, |
| <------><------>rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops)); |
| } |
| |
| |
| static void show_rcu_nocb_state(struct rcu_data *rdp) |
| { |
| <------>struct rcu_segcblist *rsclp = &rdp->cblist; |
| <------>bool waslocked; |
| <------>bool wastimer; |
| <------>bool wassleep; |
| |
| <------>if (rdp->nocb_gp_rdp == rdp) |
| <------><------>show_rcu_nocb_gp_state(rdp); |
| |
| <------>pr_info(" CB %d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%c%c%c q%ld\n", |
| <------><------>rdp->cpu, rdp->nocb_gp_rdp->cpu, |
| <------><------>"kK"[!!rdp->nocb_cb_kthread], |
| <------><------>"bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)], |
| <------><------>"cC"[!!atomic_read(&rdp->nocb_lock_contended)], |
| <------><------>"lL"[raw_spin_is_locked(&rdp->nocb_lock)], |
| <------><------>"sS"[!!rdp->nocb_cb_sleep], |
| <------><------>".W"[swait_active(&rdp->nocb_cb_wq)], |
| <------><------>jiffies - rdp->nocb_bypass_first, |
| <------><------>jiffies - rdp->nocb_nobypass_last, |
| <------><------>rdp->nocb_nobypass_count, |
| <------><------>".D"[rcu_segcblist_ready_cbs(rsclp)], |
| <------><------>".W"[!rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)], |
| <------><------>".R"[!rcu_segcblist_restempty(rsclp, RCU_WAIT_TAIL)], |
| <------><------>".N"[!rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL)], |
| <------><------>".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)], |
| <------><------>rcu_segcblist_n_cbs(&rdp->cblist)); |
| |
| <------> |
| <------>if (rdp->nocb_gp_rdp == rdp) |
| <------><------>return; |
| |
| <------>waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock); |
| <------>wastimer = timer_pending(&rdp->nocb_bypass_timer); |
| <------>wassleep = swait_active(&rdp->nocb_gp_wq); |
| <------>if (!rdp->nocb_gp_sleep && !waslocked && !wastimer && !wassleep) |
| <------><------>return; |
| |
| <------>pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c%c %c\n", |
| <------><------>"lL"[waslocked], |
| <------><------>"dD"[!!rdp->nocb_defer_wakeup], |
| <------><------>"tT"[wastimer], |
| <------><------>"sS"[!!rdp->nocb_gp_sleep], |
| <------><------>".W"[wassleep]); |
| } |
| |
| #else |
| |
| |
| static void rcu_nocb_lock(struct rcu_data *rdp) |
| { |
| } |
| |
| |
| static void rcu_nocb_unlock(struct rcu_data *rdp) |
| { |
| } |
| |
| |
| static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, |
| <------><------><------><------> unsigned long flags) |
| { |
| <------>local_irq_restore(flags); |
| } |
| |
| |
| static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp) |
| { |
| <------>lockdep_assert_irqs_disabled(); |
| } |
| |
| static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) |
| { |
| } |
| |
| static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) |
| { |
| <------>return NULL; |
| } |
| |
| static void rcu_init_one_nocb(struct rcu_node *rnp) |
| { |
| } |
| |
| static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, |
| <------><------><------><------> unsigned long j) |
| { |
| <------>return true; |
| } |
| |
| static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, |
| <------><------><------><------>bool *was_alldone, unsigned long flags) |
| { |
| <------>return false; |
| } |
| |
| static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty, |
| <------><------><------><------> unsigned long flags) |
| { |
| <------>WARN_ON_ONCE(1); |
| } |
| |
| static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) |
| { |
| } |
| |
| static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) |
| { |
| <------>return false; |
| } |
| |
| static void do_nocb_deferred_wakeup(struct rcu_data *rdp) |
| { |
| } |
| |
| static void rcu_spawn_cpu_nocb_kthread(int cpu) |
| { |
| } |
| |
| static void __init rcu_spawn_nocb_kthreads(void) |
| { |
| } |
| |
| static void show_rcu_nocb_state(struct rcu_data *rdp) |
| { |
| } |
| |
| #endif |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| static bool rcu_nohz_full_cpu(void) |
| { |
| #ifdef CONFIG_NO_HZ_FULL |
| <------>if (tick_nohz_full_cpu(smp_processor_id()) && |
| <------> (!rcu_gp_in_progress() || |
| <------> time_before(jiffies, READ_ONCE(rcu_state.gp_start) + HZ))) |
| <------><------>return true; |
| #endif |
| <------>return false; |
| } |
| |
| |
| |
| |
| static void rcu_bind_gp_kthread(void) |
| { |
| <------>if (!tick_nohz_full_enabled()) |
| <------><------>return; |
| <------>housekeeping_affine(current, HK_FLAG_RCU); |
| } |
| |
| |
| static __always_inline void rcu_dynticks_task_enter(void) |
| { |
| #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) |
| <------>WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); |
| #endif |
| } |
| |
| |
| static __always_inline void rcu_dynticks_task_exit(void) |
| { |
| #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) |
| <------>WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); |
| #endif |
| } |
| |
| |
| static __always_inline void rcu_dynticks_task_trace_enter(void) |
| { |
| #ifdef CONFIG_TASKS_TRACE_RCU |
| <------>if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) |
| <------><------>current->trc_reader_special.b.need_mb = true; |
| #endif |
| } |
| |
| |
| static __always_inline void rcu_dynticks_task_trace_exit(void) |
| { |
| #ifdef CONFIG_TASKS_TRACE_RCU |
| <------>if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) |
| <------><------>current->trc_reader_special.b.need_mb = false; |
| #endif |
| } |
| |