Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Read-Copy Update module-based torture test facility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) IBM Corporation, 2005, 2006
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *	  Josh Triplett <josh@joshtriplett.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * See also:  Documentation/RCU/torture.rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #define pr_fmt(fmt) fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/rcupdate_wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <uapi/linux/sched/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/srcu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/trace_clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/torture.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/sched/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/oom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/rcupdate_trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include "rcu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) /* Bits for ->extendables field, extendables param, and related definitions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define RCUTORTURE_RDR_SHIFT	 8	/* Put SRCU index in upper bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define RCUTORTURE_RDR_MASK	 ((1 << RCUTORTURE_RDR_SHIFT) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define RCUTORTURE_RDR_BH	 0x01	/* Extend readers by disabling bh. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define RCUTORTURE_RDR_IRQ	 0x02	/*  ... disabling interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define RCUTORTURE_RDR_PREEMPT	 0x04	/*  ... disabling preemption. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define RCUTORTURE_RDR_RBH	 0x08	/*  ... rcu_read_lock_bh(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define RCUTORTURE_RDR_SCHED	 0x10	/*  ... rcu_read_lock_sched(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define RCUTORTURE_RDR_RCU	 0x20	/*  ... entering another RCU reader. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define RCUTORTURE_RDR_NBITS	 6	/* Number of bits defined above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define RCUTORTURE_MAX_EXTEND	 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define RCUTORTURE_RDR_MAX_LOOPS 0x7	/* Maximum reader extensions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 					/* Must be power of two minus one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	      "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) torture_param(int, fqs_duration, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	      "Duration of fqs bursts (us), 0 to disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) torture_param(int, fwd_progress_holdoff, 60,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	      "Time between forward-progress tests (s)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) torture_param(bool, fwd_progress_need_resched, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	      "Hide cond_resched() behind need_resched()");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) torture_param(bool, gp_normal, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	     "Use normal (non-expedited) GP wait primitives");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) torture_param(int, n_barrier_cbs, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	     "# of callbacks/kthreads for barrier testing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) torture_param(int, nreaders, -1, "Number of RCU reader threads");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) torture_param(int, object_debug, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	     "Enable debug-object double call_rcu() testing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) torture_param(int, onoff_interval, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	     "Time between CPU hotplugs (jiffies), 0=disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) torture_param(int, read_exit_delay, 13,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	      "Delay between read-then-exit episodes (s)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) torture_param(int, read_exit_burst, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	      "# of read-then-exit bursts per episode, zero to disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) torture_param(int, stall_cpu_holdoff, 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	     "Time to wait before starting stall (s).");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) torture_param(int, stall_gp_kthread, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	      "Grace-period kthread stall duration (s).");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) torture_param(int, stat_interval, 60,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	     "Number of seconds between stats printk()s");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) torture_param(int, stutter, 5, "Number of seconds to run/halt test");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) torture_param(int, test_boost_duration, 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	     "Duration of each boost test, seconds.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) torture_param(int, test_boost_interval, 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	     "Interval between boost tests, seconds.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) torture_param(bool, test_no_idle_hz, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	     "Test support for tickless idle CPUs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) torture_param(int, verbose, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	     "Enable verbose debugging printk()s");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) static char *torture_type = "rcu";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) module_param(torture_type, charp, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static int nrealreaders;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) static struct task_struct *writer_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) static struct task_struct **fakewriter_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) static struct task_struct **reader_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) static struct task_struct *stats_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) static struct task_struct *fqs_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) static struct task_struct *boost_tasks[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) static struct task_struct *stall_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) static struct task_struct *fwd_prog_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) static struct task_struct **barrier_cbs_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static struct task_struct *barrier_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) static struct task_struct *read_exit_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) #define RCU_TORTURE_PIPE_LEN 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) struct rcu_torture {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	struct rcu_head rtort_rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	int rtort_pipe_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	struct list_head rtort_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	int rtort_mbtest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) static LIST_HEAD(rcu_torture_freelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) static struct rcu_torture __rcu *rcu_torture_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static unsigned long rcu_torture_current_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) static DEFINE_SPINLOCK(rcu_torture_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) static atomic_t n_rcu_torture_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) static atomic_t n_rcu_torture_alloc_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) static atomic_t n_rcu_torture_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static atomic_t n_rcu_torture_mberror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) static atomic_t n_rcu_torture_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) static long n_rcu_torture_barrier_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) static long n_rcu_torture_boost_ktrerror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) static long n_rcu_torture_boost_rterror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static long n_rcu_torture_boost_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) static long n_rcu_torture_boosts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) static atomic_long_t n_rcu_torture_timers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) static long n_barrier_attempts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) static long n_barrier_successes; /* did rcu_barrier test succeed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) static unsigned long n_read_exits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) static struct list_head rcu_torture_removed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) static unsigned long shutdown_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static unsigned long start_gp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) static int rcu_torture_writer_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) #define RTWS_FIXED_DELAY	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) #define RTWS_DELAY		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) #define RTWS_REPLACE		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) #define RTWS_DEF_FREE		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) #define RTWS_EXP_SYNC		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) #define RTWS_COND_GET		5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) #define RTWS_COND_SYNC		6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) #define RTWS_SYNC		7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) #define RTWS_STUTTER		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) #define RTWS_STOPPING		9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) static const char * const rcu_torture_writer_state_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	"RTWS_FIXED_DELAY",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	"RTWS_DELAY",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	"RTWS_REPLACE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	"RTWS_DEF_FREE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	"RTWS_EXP_SYNC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	"RTWS_COND_GET",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	"RTWS_COND_SYNC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	"RTWS_SYNC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	"RTWS_STUTTER",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	"RTWS_STOPPING",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) /* Record reader segment types and duration for first failing read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) struct rt_read_seg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	int rt_readstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	unsigned long rt_delay_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	unsigned long rt_delay_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	unsigned long rt_delay_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	bool rt_preempted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) static int err_segs_recorded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) static int rt_read_nsegs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) static const char *rcu_torture_writer_state_getname(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	unsigned int i = READ_ONCE(rcu_torture_writer_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		return "???";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	return rcu_torture_writer_state_names[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) #define rcu_can_boost() 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) #define rcu_can_boost() 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) #ifdef CONFIG_RCU_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) static u64 notrace rcu_trace_clock_local(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	u64 ts = trace_clock_local();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	(void)do_div(ts, NSEC_PER_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	return ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) #else /* #ifdef CONFIG_RCU_TRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) static u64 notrace rcu_trace_clock_local(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	return 0ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) #endif /* #else #ifdef CONFIG_RCU_TRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245)  * Stop aggressive CPU-hog tests a bit before the end of the test in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  * to avoid interfering with test shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) static bool shutdown_time_arrived(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) static unsigned long boost_starttime;	/* jiffies of next boost test start. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) static DEFINE_MUTEX(boost_mutex);	/* protect setting boost_starttime */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 					/*  and boost task create/destroy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) static atomic_t barrier_cbs_count;	/* Barrier callbacks registered. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) static bool barrier_phase;		/* Test phase. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) static atomic_t barrier_cbs_invoked;	/* Barrier callbacks invoked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) static bool rcu_fwd_cb_nodelay;		/* Short rcu_torture_delay() delays. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  * Allocate an element from the rcu_tortures pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) static struct rcu_torture *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) rcu_torture_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	struct list_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	spin_lock_bh(&rcu_torture_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	if (list_empty(&rcu_torture_freelist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		atomic_inc(&n_rcu_torture_alloc_fail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		spin_unlock_bh(&rcu_torture_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	atomic_inc(&n_rcu_torture_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	p = rcu_torture_freelist.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	list_del_init(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	spin_unlock_bh(&rcu_torture_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	return container_of(p, struct rcu_torture, rtort_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  * Free an element to the rcu_tortures pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) rcu_torture_free(struct rcu_torture *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	atomic_inc(&n_rcu_torture_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	spin_lock_bh(&rcu_torture_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	spin_unlock_bh(&rcu_torture_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298)  * Operations vector for selecting different types of tests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) struct rcu_torture_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	int ttype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	void (*init)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	void (*cleanup)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	int (*readlock)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	void (*read_delay)(struct torture_random_state *rrsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 			   struct rt_read_seg *rtrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	void (*readunlock)(int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	unsigned long (*get_gp_seq)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	void (*deferred_free)(struct rcu_torture *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	void (*sync)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	void (*exp_sync)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	unsigned long (*get_state)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	void (*cond_sync)(unsigned long oldstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	call_rcu_func_t call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	void (*cb_barrier)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	void (*fqs)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	void (*stats)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	int (*stall_dur)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	int irq_capable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	int can_boost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	int extendables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	int slow_gps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) static struct rcu_torture_ops *cur_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  * Definitions for rcu torture testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) static int rcu_torture_read_lock(void) __acquires(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	unsigned long started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	unsigned long completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	const unsigned long shortdelay_us = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	unsigned long longdelay_ms = 300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	unsigned long long ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	/* We want a short delay sometimes to make a reader delay the grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	 * period, and we want a long delay occasionally to trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	 * force_quiescent_state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	    !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		started = cur_ops->get_gp_seq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		ts = rcu_trace_clock_local();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 			longdelay_ms = 5; /* Avoid triggering BH limits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		mdelay(longdelay_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		rtrsp->rt_delay_ms = longdelay_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		completed = cur_ops->get_gp_seq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 					  started, completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		udelay(shortdelay_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		rtrsp->rt_delay_us = shortdelay_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	if (!preempt_count() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	    !(torture_random(rrsp) % (nrealreaders * 500))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		torture_preempt_schedule();  /* QS only if preemptible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		rtrsp->rt_preempted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) static void rcu_torture_read_unlock(int idx) __releases(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  * Update callback in the pipe.  This should be invoked after a grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) rcu_torture_pipe_update_one(struct rcu_torture *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	i = READ_ONCE(rp->rtort_pipe_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	if (i > RCU_TORTURE_PIPE_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		i = RCU_TORTURE_PIPE_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	atomic_inc(&rcu_torture_wcount[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	WRITE_ONCE(rp->rtort_pipe_count, i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		rp->rtort_mbtest = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402)  * Update all callbacks in the pipe.  Suitable for synchronous grace-period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403)  * primitives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) rcu_torture_pipe_update(struct rcu_torture *old_rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	struct rcu_torture *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	struct rcu_torture *rp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (old_rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		list_add(&old_rp->rtort_free, &rcu_torture_removed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		if (rcu_torture_pipe_update_one(rp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 			list_del(&rp->rtort_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 			rcu_torture_free(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) rcu_torture_cb(struct rcu_head *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	if (torture_must_stop_irq()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		/* Test is ending, just drop callbacks on the floor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		/* The next initialization will pick up the pieces. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	if (rcu_torture_pipe_update_one(rp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		rcu_torture_free(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		cur_ops->deferred_free(rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) static unsigned long rcu_no_completed(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) static void rcu_torture_deferred_free(struct rcu_torture *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	call_rcu(&p->rtort_rcu, rcu_torture_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) static void rcu_sync_torture_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	INIT_LIST_HEAD(&rcu_torture_removed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) static struct rcu_torture_ops rcu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	.ttype		= RCU_FLAVOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	.init		= rcu_sync_torture_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	.readlock	= rcu_torture_read_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	.read_delay	= rcu_read_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	.readunlock	= rcu_torture_read_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	.get_gp_seq	= rcu_get_gp_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	.gp_diff	= rcu_seq_diff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	.deferred_free	= rcu_torture_deferred_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	.sync		= synchronize_rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	.exp_sync	= synchronize_rcu_expedited,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	.get_state	= get_state_synchronize_rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	.cond_sync	= cond_synchronize_rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	.call		= call_rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	.cb_barrier	= rcu_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	.fqs		= rcu_force_quiescent_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	.stats		= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	.stall_dur	= rcu_jiffies_till_stall_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	.irq_capable	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	.can_boost	= rcu_can_boost(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	.extendables	= RCUTORTURE_MAX_EXTEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	.name		= "rcu"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477)  * Don't even think about trying any of these in real life!!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  * The names includes "busted", and they really means it!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  * The only purpose of these functions is to provide a buggy RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)  * implementation to make sure that rcutorture correctly emits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481)  * buggy-RCU error messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	/* This is a deliberate bug for testing purposes only! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	rcu_torture_cb(&p->rtort_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) static void synchronize_rcu_busted(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	/* This is a deliberate bug for testing purposes only! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	/* This is a deliberate bug for testing purposes only! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	func(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) static struct rcu_torture_ops rcu_busted_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	.ttype		= INVALID_RCU_FLAVOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	.init		= rcu_sync_torture_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	.readlock	= rcu_torture_read_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	.readunlock	= rcu_torture_read_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	.get_gp_seq	= rcu_no_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	.deferred_free	= rcu_busted_torture_deferred_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	.sync		= synchronize_rcu_busted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	.exp_sync	= synchronize_rcu_busted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	.call		= call_rcu_busted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	.cb_barrier	= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	.fqs		= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	.stats		= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	.irq_capable	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	.name		= "busted"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520)  * Definitions for srcu torture testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) DEFINE_STATIC_SRCU(srcu_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) static struct srcu_struct srcu_ctld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) static struct srcu_struct *srcu_ctlp = &srcu_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	return srcu_read_lock(srcu_ctlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	long delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	const long uspertick = 1000000 / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	const long longdelay = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	/* We want there to be long-running readers, but not all the time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	delay = torture_random(rrsp) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		(nrealreaders * 2 * longdelay * uspertick);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	if (!delay && in_task()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		schedule_timeout_interruptible(longdelay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		rtrsp->rt_delay_jiffies = longdelay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		rcu_read_delay(rrsp, rtrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	srcu_read_unlock(srcu_ctlp, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) static unsigned long srcu_torture_completed(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	return srcu_batches_completed(srcu_ctlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) static void srcu_torture_deferred_free(struct rcu_torture *rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) static void srcu_torture_synchronize(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	synchronize_srcu(srcu_ctlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) static void srcu_torture_call(struct rcu_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			      rcu_callback_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	call_srcu(srcu_ctlp, head, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) static void srcu_torture_barrier(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	srcu_barrier(srcu_ctlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) static void srcu_torture_stats(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) static void srcu_torture_synchronize_expedited(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	synchronize_srcu_expedited(srcu_ctlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) static struct rcu_torture_ops srcu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	.ttype		= SRCU_FLAVOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	.init		= rcu_sync_torture_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	.readlock	= srcu_torture_read_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	.read_delay	= srcu_read_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	.readunlock	= srcu_torture_read_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	.get_gp_seq	= srcu_torture_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	.deferred_free	= srcu_torture_deferred_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	.sync		= srcu_torture_synchronize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	.exp_sync	= srcu_torture_synchronize_expedited,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	.call		= srcu_torture_call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	.cb_barrier	= srcu_torture_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	.stats		= srcu_torture_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	.irq_capable	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	.name		= "srcu"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) static void srcu_torture_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	rcu_sync_torture_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	WARN_ON(init_srcu_struct(&srcu_ctld));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	srcu_ctlp = &srcu_ctld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) static void srcu_torture_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	cleanup_srcu_struct(&srcu_ctld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) /* As above, but dynamically allocated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) static struct rcu_torture_ops srcud_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	.ttype		= SRCU_FLAVOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	.init		= srcu_torture_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	.cleanup	= srcu_torture_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	.readlock	= srcu_torture_read_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	.read_delay	= srcu_read_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	.readunlock	= srcu_torture_read_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	.get_gp_seq	= srcu_torture_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	.deferred_free	= srcu_torture_deferred_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	.sync		= srcu_torture_synchronize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	.exp_sync	= srcu_torture_synchronize_expedited,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	.call		= srcu_torture_call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	.cb_barrier	= srcu_torture_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	.stats		= srcu_torture_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	.irq_capable	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	.name		= "srcud"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) /* As above, but broken due to inappropriate reader extension. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) static struct rcu_torture_ops busted_srcud_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	.ttype		= SRCU_FLAVOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	.init		= srcu_torture_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	.cleanup	= srcu_torture_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	.readlock	= srcu_torture_read_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	.read_delay	= rcu_read_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	.readunlock	= srcu_torture_read_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	.get_gp_seq	= srcu_torture_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	.deferred_free	= srcu_torture_deferred_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	.sync		= srcu_torture_synchronize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	.exp_sync	= srcu_torture_synchronize_expedited,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	.call		= srcu_torture_call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	.cb_barrier	= srcu_torture_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	.stats		= srcu_torture_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	.irq_capable	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	.extendables	= RCUTORTURE_MAX_EXTEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	.name		= "busted_srcud"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662)  * Definitions for RCU-tasks torture testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) static int tasks_torture_read_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) static void tasks_torture_read_unlock(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) static void synchronize_rcu_mult_test(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	synchronize_rcu_mult(call_rcu_tasks, call_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) static struct rcu_torture_ops tasks_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	.ttype		= RCU_TASKS_FLAVOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	.init		= rcu_sync_torture_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	.readlock	= tasks_torture_read_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	.readunlock	= tasks_torture_read_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	.get_gp_seq	= rcu_no_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	.deferred_free	= rcu_tasks_torture_deferred_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	.sync		= synchronize_rcu_tasks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	.exp_sync	= synchronize_rcu_mult_test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	.call		= call_rcu_tasks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	.cb_barrier	= rcu_barrier_tasks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	.fqs		= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	.stats		= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	.irq_capable	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	.slow_gps	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	.name		= "tasks"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704)  * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  * This implementation does not necessarily work well with CPU hotplug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) static void synchronize_rcu_trivial(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		WARN_ON_ONCE(raw_smp_processor_id() != cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) static struct rcu_torture_ops trivial_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	.ttype		= RCU_TRIVIAL_FLAVOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	.init		= rcu_sync_torture_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	.readlock	= rcu_torture_read_lock_trivial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	.readunlock	= rcu_torture_read_unlock_trivial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	.get_gp_seq	= rcu_no_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	.sync		= synchronize_rcu_trivial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	.exp_sync	= synchronize_rcu_trivial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	.fqs		= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	.stats		= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	.irq_capable	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	.name		= "trivial"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  * Definitions for rude RCU-tasks torture testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) static struct rcu_torture_ops tasks_rude_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	.ttype		= RCU_TASKS_RUDE_FLAVOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	.init		= rcu_sync_torture_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	.readlock	= rcu_torture_read_lock_trivial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	.readunlock	= rcu_torture_read_unlock_trivial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	.get_gp_seq	= rcu_no_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	.deferred_free	= rcu_tasks_rude_torture_deferred_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	.sync		= synchronize_rcu_tasks_rude,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	.exp_sync	= synchronize_rcu_tasks_rude,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	.call		= call_rcu_tasks_rude,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	.cb_barrier	= rcu_barrier_tasks_rude,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	.fqs		= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	.stats		= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	.irq_capable	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	.name		= "tasks-rude"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772)  * Definitions for tracing RCU-tasks torture testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) static int tasks_tracing_torture_read_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	rcu_read_lock_trace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) static void tasks_tracing_torture_read_unlock(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	rcu_read_unlock_trace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) static struct rcu_torture_ops tasks_tracing_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	.ttype		= RCU_TASKS_TRACING_FLAVOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	.init		= rcu_sync_torture_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	.readlock	= tasks_tracing_torture_read_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	.read_delay	= srcu_read_delay,  /* just reuse srcu's version. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	.readunlock	= tasks_tracing_torture_read_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	.get_gp_seq	= rcu_no_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	.deferred_free	= rcu_tasks_tracing_torture_deferred_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	.sync		= synchronize_rcu_tasks_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	.exp_sync	= synchronize_rcu_tasks_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	.call		= call_rcu_tasks_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	.cb_barrier	= rcu_barrier_tasks_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	.fqs		= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	.stats		= NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	.irq_capable	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	.slow_gps	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	.name		= "tasks-tracing"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	if (!cur_ops->gp_diff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		return new - old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	return cur_ops->gp_diff(new, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) static bool __maybe_unused torturing_tasks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823)  * RCU torture priority-boost testing.  Runs one real-time thread per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  * CPU for moderate bursts, repeatedly registering RCU callbacks and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825)  * spinning waiting for them to be invoked.  If a given callback takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826)  * too long to be invoked, we assume that priority inversion has occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) struct rcu_boost_inflight {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	int inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) static void rcu_torture_boost_cb(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	struct rcu_boost_inflight *rbip =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		container_of(head, struct rcu_boost_inflight, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	/* Ensure RCU-core accesses precede clearing ->inflight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	smp_store_release(&rbip->inflight, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) static int old_rt_runtime = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) static void rcu_torture_disable_rt_throttle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	 * Disable RT throttling so that rcutorture's boost threads don't get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	 * throttled. Only possible if rcutorture is built-in otherwise the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	 * user should manually do this by setting the sched_rt_period_us and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	 * sched_rt_runtime sysctls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	old_rt_runtime = sysctl_sched_rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	sysctl_sched_rt_runtime = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) static void rcu_torture_enable_rt_throttle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	sysctl_sched_rt_runtime = old_rt_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	old_rt_runtime = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	if (end - start > test_boost_duration * HZ - HZ / 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		n_rcu_torture_boost_failure++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		return true; /* failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	return false; /* passed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) static int rcu_torture_boost(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	unsigned long call_rcu_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	unsigned long endtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	unsigned long oldstarttime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	struct rcu_boost_inflight rbi = { .inflight = 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	VERBOSE_TOROUT_STRING("rcu_torture_boost started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	/* Set real-time priority. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	sched_set_fifo_low(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	init_rcu_head_on_stack(&rbi.rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	/* Each pass through the following loop does one boost-test cycle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		/* Track if the test failed already in this test interval? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		bool failed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		/* Increment n_rcu_torture_boosts once per boost-test */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			if (mutex_trylock(&boost_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 				n_rcu_torture_boosts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 				mutex_unlock(&boost_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 			schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			goto checkwait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		/* Wait for the next test interval. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		oldstarttime = boost_starttime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		while (time_before(jiffies, oldstarttime)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			schedule_timeout_interruptible(oldstarttime - jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			stutter_wait("rcu_torture_boost");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 			if (torture_must_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 				goto checkwait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		/* Do one boost-test interval. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		endtime = oldstarttime + test_boost_duration * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		call_rcu_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		while (time_before(jiffies, endtime)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			/* If we don't have a callback in flight, post one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			if (!smp_load_acquire(&rbi.inflight)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 				/* RCU core before ->inflight = 1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 				smp_store_release(&rbi.inflight, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 				call_rcu(&rbi.rcu, rcu_torture_boost_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 				/* Check if the boost test failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 				failed = failed ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 					 rcu_torture_boost_failed(call_rcu_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 								 jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 				call_rcu_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			stutter_wait("rcu_torture_boost");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			if (torture_must_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 				goto checkwait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		 * If boost never happened, then inflight will always be 1, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		 * this case the boost check would never happen in the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		 * loop so do another one here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		if (!failed && smp_load_acquire(&rbi.inflight))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			rcu_torture_boost_failed(call_rcu_time, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		 * Set the start time of the next test interval.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		 * Yes, this is vulnerable to long delays, but such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		 * delays simply cause a false negative for the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		 * interval.  Besides, we are running at RT priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		 * so delays should be relatively rare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		while (oldstarttime == boost_starttime &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		       !kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			if (mutex_trylock(&boost_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 				boost_starttime = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 						  test_boost_interval * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 				mutex_unlock(&boost_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 			schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		/* Go do the stutter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) checkwait:	stutter_wait("rcu_torture_boost");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	/* Clean up and exit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		torture_shutdown_absorb("rcu_torture_boost");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	destroy_rcu_head_on_stack(&rbi.rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	torture_kthread_stopping("rcu_torture_boost");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981)  * RCU torture force-quiescent-state kthread.  Repeatedly induces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982)  * bursts of calls to force_quiescent_state(), increasing the probability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983)  * of occurrence of some important types of race conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) rcu_torture_fqs(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	unsigned long fqs_resume_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	int fqs_burst_remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		fqs_resume_time = jiffies + fqs_stutter * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		while (time_before(jiffies, fqs_resume_time) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		       !kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			schedule_timeout_interruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		fqs_burst_remaining = fqs_duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		while (fqs_burst_remaining > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		       !kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			cur_ops->fqs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			udelay(fqs_holdoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			fqs_burst_remaining -= fqs_holdoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		stutter_wait("rcu_torture_fqs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	torture_kthread_stopping("rcu_torture_fqs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)  * RCU torture writer kthread.  Repeatedly substitutes a new structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)  * for that pointed to by rcu_torture_current, freeing the old structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)  * after a series of grace periods (the "pipeline").
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) rcu_torture_writer(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	int expediting = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	unsigned long gp_snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	bool gp_sync1 = gp_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	struct rcu_torture *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	struct rcu_torture *old_rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	static DEFINE_TORTURE_RANDOM(rand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			   RTWS_COND_GET, RTWS_SYNC };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	int nsynctypes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	if (!can_expedite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		pr_alert("%s" TORTURE_FLAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			 " GP expediting controlled from boot/sysfs for %s.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			 torture_type, cur_ops->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	/* Initialize synctype[] array.  If none set, take default. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		synctype[nsynctypes++] = RTWS_COND_GET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		pr_info("%s: Testing conditional GPs.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	} else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		pr_alert("%s: gp_cond without primitives.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	if (gp_exp1 && cur_ops->exp_sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		synctype[nsynctypes++] = RTWS_EXP_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		pr_info("%s: Testing expedited GPs.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	} else if (gp_exp && !cur_ops->exp_sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		pr_alert("%s: gp_exp without primitives.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	if (gp_normal1 && cur_ops->deferred_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		synctype[nsynctypes++] = RTWS_DEF_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		pr_info("%s: Testing asynchronous GPs.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	} else if (gp_normal && !cur_ops->deferred_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		pr_alert("%s: gp_normal without primitives.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	if (gp_sync1 && cur_ops->sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		synctype[nsynctypes++] = RTWS_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		pr_info("%s: Testing normal GPs.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	} else if (gp_sync && !cur_ops->sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		pr_alert("%s: gp_sync without primitives.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	if (WARN_ONCE(nsynctypes == 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		      "rcu_torture_writer: No update-side primitives.\n")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		 * No updates primitives, so don't try updating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		 * The resulting test won't be testing much, hence the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		 * above WARN_ONCE().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		rcu_torture_writer_state = RTWS_STOPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		torture_kthread_stopping("rcu_torture_writer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		rcu_torture_writer_state = RTWS_FIXED_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		rp = rcu_torture_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		if (rp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		rp->rtort_pipe_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		rcu_torture_writer_state = RTWS_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		udelay(torture_random(&rand) & 0x3ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		rcu_torture_writer_state = RTWS_REPLACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		old_rp = rcu_dereference_check(rcu_torture_current,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 					       current == writer_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		rp->rtort_mbtest = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		rcu_assign_pointer(rcu_torture_current, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		if (old_rp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			i = old_rp->rtort_pipe_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			if (i > RCU_TORTURE_PIPE_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 				i = RCU_TORTURE_PIPE_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 			atomic_inc(&rcu_torture_wcount[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			WRITE_ONCE(old_rp->rtort_pipe_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 				   old_rp->rtort_pipe_count + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			switch (synctype[torture_random(&rand) % nsynctypes]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			case RTWS_DEF_FREE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 				rcu_torture_writer_state = RTWS_DEF_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 				cur_ops->deferred_free(old_rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			case RTWS_EXP_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 				rcu_torture_writer_state = RTWS_EXP_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 				cur_ops->exp_sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 				rcu_torture_pipe_update(old_rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			case RTWS_COND_GET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 				rcu_torture_writer_state = RTWS_COND_GET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 				gp_snap = cur_ops->get_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 				i = torture_random(&rand) % 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 				if (i != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 					schedule_timeout_interruptible(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 				udelay(torture_random(&rand) % 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 				rcu_torture_writer_state = RTWS_COND_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 				cur_ops->cond_sync(gp_snap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 				rcu_torture_pipe_update(old_rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			case RTWS_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 				rcu_torture_writer_state = RTWS_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 				cur_ops->sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 				rcu_torture_pipe_update(old_rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 				WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		WRITE_ONCE(rcu_torture_current_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 			   rcu_torture_current_version + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		/* Cycle through nesting levels of rcu_expedite_gp() calls. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		if (can_expedite &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		    !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 			WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 			if (expediting >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 				rcu_expedite_gp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 				rcu_unexpedite_gp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			if (++expediting > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 				expediting = -expediting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		} else if (!can_expedite) { /* Disabled during boot, recheck. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			can_expedite = !rcu_gp_is_expedited() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 				       !rcu_gp_is_normal();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		rcu_torture_writer_state = RTWS_STUTTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		if (stutter_wait("rcu_torture_writer") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		    !READ_ONCE(rcu_fwd_cb_nodelay) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		    !cur_ops->slow_gps &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		    !torture_must_stop() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		    rcu_inkernel_boot_has_ended())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 				if (list_empty(&rcu_tortures[i].rtort_free) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 				    rcu_access_pointer(rcu_torture_current) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 				    &rcu_tortures[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 					rcu_ftrace_dump(DUMP_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 					WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	rcu_torture_current = NULL;  // Let stats task know that we are done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	/* Reset expediting back to unexpedited. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	if (expediting > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		expediting = -expediting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	while (can_expedite && expediting++ < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		rcu_unexpedite_gp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	if (!can_expedite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		pr_alert("%s" TORTURE_FLAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			 " Dynamic grace-period expediting was disabled.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			 torture_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	rcu_torture_writer_state = RTWS_STOPPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	torture_kthread_stopping("rcu_torture_writer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)  * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)  * delay between calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) rcu_torture_fakewriter(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	DEFINE_TORTURE_RANDOM(rand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	set_user_nice(current, MAX_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		udelay(torture_random(&rand) & 0x3ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		if (cur_ops->cb_barrier != NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		    torture_random(&rand) % (nfakewriters * 8) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 			cur_ops->cb_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		} else if (gp_normal == gp_exp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			if (cur_ops->sync && torture_random(&rand) & 0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 				cur_ops->sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 			else if (cur_ops->exp_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 				cur_ops->exp_sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		} else if (gp_normal && cur_ops->sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			cur_ops->sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		} else if (cur_ops->exp_sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			cur_ops->exp_sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		stutter_wait("rcu_torture_fakewriter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	torture_kthread_stopping("rcu_torture_fakewriter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static void rcu_torture_timer_cb(struct rcu_head *rhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	kfree(rhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)  * Do one extension of an RCU read-side critical section using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)  * current reader state in readstate (set to zero for initial entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)  * to extended critical section), set the new state as specified by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)  * newstate (set to zero for final exit from extended critical section),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)  * and random-number-generator state in trsp.  If this is neither the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)  * beginning or end of the critical section and if there was actually a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)  * change, do a ->read_delay().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) static void rcutorture_one_extend(int *readstate, int newstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 				  struct torture_random_state *trsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 				  struct rt_read_seg *rtrsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	int idxnew = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	int idxold = *readstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	int statesnew = ~*readstate & newstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	int statesold = *readstate & ~newstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	WARN_ON_ONCE(idxold < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	rtrsp->rt_readstate = newstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	/* First, put new protection in place to avoid critical-section gap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	if (statesnew & RCUTORTURE_RDR_BH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	if (statesnew & RCUTORTURE_RDR_RBH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	if (statesnew & RCUTORTURE_RDR_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	if (statesnew & RCUTORTURE_RDR_PREEMPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	if (statesnew & RCUTORTURE_RDR_SCHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		rcu_read_lock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	if (statesnew & RCUTORTURE_RDR_RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	 * Next, remove old protection, in decreasing order of strength
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	 * to avoid unlock paths that aren't safe in the stronger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	 * context. Namely: BH can not be enabled with disabled interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	 * context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	if (statesold & RCUTORTURE_RDR_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	if (statesold & RCUTORTURE_RDR_PREEMPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	if (statesold & RCUTORTURE_RDR_SCHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		rcu_read_unlock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	if (statesold & RCUTORTURE_RDR_BH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	if (statesold & RCUTORTURE_RDR_RBH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	if (statesold & RCUTORTURE_RDR_RCU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		if (lockit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 			raw_spin_lock_irqsave(&current->pi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		if (lockit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 			raw_spin_unlock_irqrestore(&current->pi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	/* Delay if neither beginning nor end and there was a change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	if ((statesnew || statesold) && *readstate && newstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		cur_ops->read_delay(trsp, rtrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	/* Update the reader state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	if (idxnew == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		idxnew = idxold & ~RCUTORTURE_RDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	WARN_ON_ONCE(idxnew < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	*readstate = idxnew | newstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /* Return the biggest extendables mask given current RCU and boot parameters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) static int rcutorture_extend_mask_max(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	mask = mask | RCUTORTURE_RDR_RCU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* Return a random protection state mask, but with at least one bit set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	int mask = rcutorture_extend_mask_max();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	unsigned long randmask1 = torture_random(trsp) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	unsigned long randmask2 = randmask1 >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	/* Mostly only one bit (need preemption!), sometimes lots of bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	if (!(randmask1 & 0x7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		mask = mask & randmask2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	 * Can't enable bh w/irq disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	if (mask & RCUTORTURE_RDR_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		mask |= oldmask & bhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	 * Ideally these sequences would be detected in debug builds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	 * (regardless of RT), but until then don't stop testing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	 * them on non-RT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		/* Can't modify BH in atomic context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		if (oldmask & preempts_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 			mask &= ~bhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		if ((oldmask | mask) & preempts_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			mask |= oldmask & bhs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	return mask ?: RCUTORTURE_RDR_RCU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)  * Do a randomly selected number of extensions of an existing RCU read-side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)  * critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) static struct rt_read_seg *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		       struct rt_read_seg *rtrsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	int mask = rcutorture_extend_mask_max();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	if (!((mask - 1) & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		return rtrsp;  /* Current RCU reader not extendable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	/* Bias towards larger numbers of loops. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	i = (torture_random(trsp) >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		mask = rcutorture_extend_mask(*readstate, trsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	return &rtrsp[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)  * Do one read-side critical section, returning false if there was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)  * no data to read.  Can be invoked both from process context and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)  * from a timer handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) static bool rcu_torture_one_read(struct torture_random_state *trsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	unsigned long started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	unsigned long completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	int newstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	struct rcu_torture *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	int pipe_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	int readstate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	struct rt_read_seg *rtrsp = &rtseg[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	struct rt_read_seg *rtrsp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	unsigned long long ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	WARN_ON_ONCE(!rcu_is_watching());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	newstate = rcutorture_extend_mask(readstate, trsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	started = cur_ops->get_gp_seq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	ts = rcu_trace_clock_local();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	p = rcu_dereference_check(rcu_torture_current,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 				  rcu_read_lock_bh_held() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 				  rcu_read_lock_sched_held() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 				  srcu_read_lock_held(srcu_ctlp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 				  rcu_read_lock_trace_held() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 				  torturing_tasks());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	if (p == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		/* Wait for rcu_torture_writer to get underway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	if (p->rtort_mbtest == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		atomic_inc(&n_rcu_torture_mberror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	pipe_count = READ_ONCE(p->rtort_pipe_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		/* Should not happen, but... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		pipe_count = RCU_TORTURE_PIPE_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	completed = cur_ops->get_gp_seq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	if (pipe_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 					  ts, started, completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		rcu_ftrace_dump(DUMP_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	__this_cpu_inc(rcu_torture_count[pipe_count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	completed = rcutorture_seq_diff(completed, started);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	if (completed > RCU_TORTURE_PIPE_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		/* Should not happen, but... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		completed = RCU_TORTURE_PIPE_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	__this_cpu_inc(rcu_torture_batch[completed]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	// This next splat is expected behavior if leakpointer, especially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	/* If error or close call, record the sequence of reader protections. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 			err_segs[i++] = *rtrsp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		rt_read_nsegs = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)  * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)  * incrementing the corresponding element of the pipeline array.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)  * counter in the element should never be greater than 1, otherwise, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)  * RCU implementation is broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) static void rcu_torture_timer(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	atomic_long_inc(&n_rcu_torture_timers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	(void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	/* Test call_rcu() invocation from interrupt handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	if (cur_ops->call) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		if (rhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 			cur_ops->call(rhp, rcu_torture_timer_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)  * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)  * incrementing the corresponding element of the pipeline array.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)  * counter in the element should never be greater than 1, otherwise, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)  * RCU implementation is broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) rcu_torture_reader(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	unsigned long lastsleep = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	long myid = (long)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	int mynumonline = myid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	DEFINE_TORTURE_RANDOM(rand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	struct timer_list t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	set_user_nice(current, MAX_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	if (irqreader && cur_ops->irq_capable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		timer_setup_on_stack(&t, rcu_torture_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		if (irqreader && cur_ops->irq_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 			if (!timer_pending(&t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 				mod_timer(&t, jiffies + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		if (!rcu_torture_one_read(&rand) && !torture_must_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 			schedule_timeout_interruptible(HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			schedule_timeout_interruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 			lastsleep = jiffies + 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		while (num_online_cpus() < mynumonline && !torture_must_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 			schedule_timeout_interruptible(HZ / 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		stutter_wait("rcu_torture_reader");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	if (irqreader && cur_ops->irq_capable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		del_timer_sync(&t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		destroy_timer_on_stack(&t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	torture_kthread_stopping("rcu_torture_reader");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)  * Print torture statistics.  Caller must ensure that there is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)  * one call to this function at a given time!!!  This is normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)  * accomplished by relying on the module system to only have one copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)  * of the module loaded, and then by giving the rcu_torture_stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)  * kthread full control (or the init/cleanup functions when rcu_torture_stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)  * thread is not running).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) rcu_torture_stats_print(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	struct rcu_torture *rtcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	static unsigned long rtcv_snap = ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	static bool splatted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	struct task_struct *wtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 			pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 			batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		if (pipesummary[i] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	rtcp = rcu_access_pointer(rcu_torture_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		rtcp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		rcu_torture_current_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		list_empty(&rcu_torture_freelist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		atomic_read(&n_rcu_torture_alloc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		atomic_read(&n_rcu_torture_alloc_fail),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		atomic_read(&n_rcu_torture_free));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		atomic_read(&n_rcu_torture_mberror),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		n_rcu_torture_barrier_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		n_rcu_torture_boost_ktrerror,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		n_rcu_torture_boost_rterror);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		n_rcu_torture_boost_failure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		n_rcu_torture_boosts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		atomic_long_read(&n_rcu_torture_timers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	torture_onoff_stats();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	pr_cont("barrier: %ld/%ld:%ld ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		data_race(n_barrier_successes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		data_race(n_barrier_attempts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		data_race(n_rcu_torture_barrier_error));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	pr_cont("read-exits: %ld\n", data_race(n_read_exits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	if (atomic_read(&n_rcu_torture_mberror) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	    n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	    n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	    i > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		pr_cont("%s", "!!! ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		atomic_inc(&n_rcu_torture_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		WARN_ON_ONCE(n_rcu_torture_barrier_error);  // rcu_barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		WARN_ON_ONCE(i > 1); // Too-short grace period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	pr_cont("Reader Pipe: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		pr_cont(" %ld", pipesummary[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	pr_cont("Reader Batch: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		pr_cont(" %ld", batchsummary[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	pr_cont("Free-Block Circulation: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	if (cur_ops->stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		cur_ops->stats();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	if (rtcv_snap == rcu_torture_current_version &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	    rcu_access_pointer(rcu_torture_current) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	    !rcu_stall_is_suppressed()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		int __maybe_unused flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		unsigned long __maybe_unused gp_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		rcutorture_get_gp_data(cur_ops->ttype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 				       &flags, &gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 					&flags, &gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		wtp = READ_ONCE(writer_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 			 rcu_torture_writer_state_getname(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 			 rcu_torture_writer_state, gp_seq, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 			 wtp == NULL ? ~0UL : wtp->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 			 wtp == NULL ? -1 : (int)task_cpu(wtp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		if (!splatted && wtp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 			sched_show_task(wtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 			splatted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		show_rcu_gp_kthreads();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		rcu_ftrace_dump(DUMP_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	rtcv_snap = rcu_torture_current_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)  * Periodically prints torture statistics, if periodic statistics printing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)  * was specified via the stat_interval module parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) rcu_torture_stats(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		schedule_timeout_interruptible(stat_interval * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		rcu_torture_stats_print();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		torture_shutdown_absorb("rcu_torture_stats");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	torture_kthread_stopping("rcu_torture_stats");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	pr_alert("%s" TORTURE_FLAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		 "--- %s: nreaders=%d nfakewriters=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		 "shuffle_interval=%d stutter=%d irqreader=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		 "test_boost=%d/%d test_boost_interval=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		 "test_boost_duration=%d shutdown_secs=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		 "stall_cpu_block=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		 "n_barrier_cbs=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		 "onoff_interval=%d onoff_holdoff=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		 "read_exit_delay=%d read_exit_burst=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		 torture_type, tag, nrealreaders, nfakewriters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		 test_boost, cur_ops->can_boost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		 test_boost_interval, test_boost_duration, shutdown_secs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		 stall_cpu_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		 n_barrier_cbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		 onoff_interval, onoff_holdoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 		 read_exit_delay, read_exit_burst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static int rcutorture_booster_cleanup(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	struct task_struct *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	if (boost_tasks[cpu] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	mutex_lock(&boost_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	t = boost_tasks[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	boost_tasks[cpu] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	rcu_torture_enable_rt_throttle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	mutex_unlock(&boost_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	/* This must be outside of the mutex, otherwise deadlock! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	torture_stop_kthread(rcu_torture_boost, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) static int rcutorture_booster_init(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	if (boost_tasks[cpu] != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		return 0;  /* Already created, nothing more to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	/* Don't allow time recalculation while creating a new task. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	mutex_lock(&boost_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	rcu_torture_disable_rt_throttle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 						  cpu_to_node(cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 						  "rcu_torture_boost");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	if (IS_ERR(boost_tasks[cpu])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		retval = PTR_ERR(boost_tasks[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		n_rcu_torture_boost_ktrerror++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		boost_tasks[cpu] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		mutex_unlock(&boost_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	kthread_bind(boost_tasks[cpu], cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	wake_up_process(boost_tasks[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	mutex_unlock(&boost_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)  * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)  * induces a CPU stall for the time specified by stall_cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) static int rcu_torture_stall(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	unsigned long stop_at;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	if (stall_cpu_holdoff > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	if (!kthread_should_stop() && stall_gp_kthread > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 			if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 			schedule_timeout_uninterruptible(HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	if (!kthread_should_stop() && stall_cpu > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		stop_at = ktime_get_seconds() + stall_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		/* RCU CPU stall is expected behavior in following code. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		idx = cur_ops->readlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		if (stall_cpu_irqsoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 			local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		else if (!stall_cpu_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 			preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		pr_alert("rcu_torture_stall start on CPU %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 			 raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 				    stop_at))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 			if (stall_cpu_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 				schedule_timeout_uninterruptible(HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 		if (stall_cpu_irqsoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 			local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		else if (!stall_cpu_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 			preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		cur_ops->readunlock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	pr_alert("rcu_torture_stall end.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	torture_shutdown_absorb("rcu_torture_stall");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	while (!kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		schedule_timeout_interruptible(10 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) /* Spawn CPU-stall kthread, if stall_cpu specified. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) static int __init rcu_torture_stall_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	if (stall_cpu <= 0 && stall_gp_kthread <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) /* State structure for forward-progress self-propagating RCU callback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) struct fwd_cb_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	struct rcu_head rh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	int stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)  * Forward-progress self-propagating RCU callback function.  Because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)  * callbacks run from softirq, this function is an implicit RCU read-side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)  * critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	if (READ_ONCE(fcsp->stop)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		WRITE_ONCE(fcsp->stop, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) /* State for continuous-flood RCU callbacks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) struct rcu_fwd_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	struct rcu_head rh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	struct rcu_fwd_cb *rfc_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	struct rcu_fwd *rfc_rfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	int rfc_gps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) #define MAX_FWD_CB_JIFFIES	(8 * HZ) /* Maximum CB test duration. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) #define MIN_FWD_CB_LAUNDERS	3	/* This many CB invocations to count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) #define MIN_FWD_CBS_LAUNDERED	100	/* Number of counted CBs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) #define FWD_CBS_HIST_DIV	10	/* Histogram buckets/second. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) struct rcu_launder_hist {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	long n_launders;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	unsigned long launder_gp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct rcu_fwd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	spinlock_t rcu_fwd_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	struct rcu_fwd_cb *rcu_fwd_cb_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	struct rcu_fwd_cb **rcu_fwd_cb_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	long n_launders_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	unsigned long rcu_fwd_startat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	unsigned long rcu_launder_gp_seq_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) static DEFINE_MUTEX(rcu_fwd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) static struct rcu_fwd *rcu_fwds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) static bool rcu_fwd_emergency_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	unsigned long gps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	unsigned long gps_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		if (rfp->n_launders_hist[i].n_launders > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		 __func__, jiffies - rfp->rcu_fwd_startat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	gps_old = rfp->rcu_launder_gp_seq_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	for (j = 0; j <= i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		gps = rfp->n_launders_hist[j].launder_gp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		pr_cont(" %ds/%d: %ld:%ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 			j + 1, FWD_CBS_HIST_DIV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 			rfp->n_launders_hist[j].n_launders,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 			rcutorture_seq_diff(gps, gps_old));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		gps_old = gps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) /* Callback function for continuous-flood RCU callbacks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	struct rcu_fwd_cb **rfcpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	struct rcu_fwd *rfp = rfcp->rfc_rfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	rfcp->rfc_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	rfcp->rfc_gps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	rfcpp = rfp->rcu_fwd_cb_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	WRITE_ONCE(*rfcpp, rfcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	if (i >= ARRAY_SIZE(rfp->n_launders_hist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 		i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	rfp->n_launders_hist[i].n_launders++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) // Give the scheduler a chance, even on nohz_full CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		// Real call_rcu() floods hit userspace, so emulate that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		if (need_resched() || (iter & 0xfff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	// No userspace emulation: CB invocation throttles call_rcu()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)  * Free all callbacks on the rcu_fwd_cb_head list, either because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)  * test is over or because we hit an OOM event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	unsigned long freed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	struct rcu_fwd_cb *rfcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		rfcp = rfp->rcu_fwd_cb_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		if (!rfcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 			spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		rfp->rcu_fwd_cb_head = rfcp->rfc_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		if (!rfp->rcu_fwd_cb_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 			rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		kfree(rfcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		freed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		rcu_torture_fwd_prog_cond_resched(freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		if (tick_nohz_full_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 			local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 			rcu_momentary_dyntick_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 			local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	return freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) /* Carry out need_resched()/cond_resched() forward-progress testing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 				    int *tested, int *tested_tries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	unsigned long cver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	unsigned long dur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	struct fwd_cb_state fcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	unsigned long gps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	int sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	int sd4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	bool selfpropcb = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	unsigned long stopat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	static DEFINE_TORTURE_RANDOM(trs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	if  (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 		init_rcu_head_on_stack(&fcs.rh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 		selfpropcb = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	/* Tight loop containing cond_resched(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	WRITE_ONCE(rcu_fwd_cb_nodelay, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	cur_ops->sync(); /* Later readers see above write. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	if  (selfpropcb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		WRITE_ONCE(fcs.stop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	cver = READ_ONCE(rcu_torture_current_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	gps = cur_ops->get_gp_seq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	sd = cur_ops->stall_dur() + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	dur = sd4 + torture_random(&trs) % (sd - sd4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	stopat = rfp->rcu_fwd_startat + dur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	while (time_before(jiffies, stopat) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	       !shutdown_time_arrived() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		idx = cur_ops->readlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		cur_ops->readunlock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		if (!fwd_progress_need_resched || need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 			cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	(*tested_tries)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	if (!time_before(jiffies, stopat) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	    !shutdown_time_arrived() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	    !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		(*tested)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		cver = READ_ONCE(rcu_torture_current_version) - cver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		WARN_ON(!cver && gps < 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	if (selfpropcb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		WRITE_ONCE(fcs.stop, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		cur_ops->sync(); /* Wait for running CB to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		cur_ops->cb_barrier(); /* Wait for queued callbacks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	if (selfpropcb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		WARN_ON(READ_ONCE(fcs.stop) != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		destroy_rcu_head_on_stack(&fcs.rh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	WRITE_ONCE(rcu_fwd_cb_nodelay, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) /* Carry out call_rcu() forward-progress testing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	unsigned long cver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	unsigned long gps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	long n_launders;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	long n_launders_cb_snap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	long n_launders_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	long n_max_cbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	long n_max_gps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	struct rcu_fwd_cb *rfcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	struct rcu_fwd_cb *rfcpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	unsigned long stopat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	unsigned long stoppedat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	if (READ_ONCE(rcu_fwd_emergency_stop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 		return; /* Get out of the way quickly, no GP wait! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	if (!cur_ops->call)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		return; /* Can't do call_rcu() fwd prog without ->call. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	/* Loop continuously posting RCU callbacks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	WRITE_ONCE(rcu_fwd_cb_nodelay, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	cur_ops->sync(); /* Later readers see above write. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	n_launders = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	n_launders_sa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	n_max_cbs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	n_max_gps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		rfp->n_launders_hist[i].n_launders = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	cver = READ_ONCE(rcu_torture_current_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	gps = cur_ops->get_gp_seq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	rfp->rcu_launder_gp_seq_start = gps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	while (time_before(jiffies, stopat) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	       !shutdown_time_arrived() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		rfcpn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		if (rfcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 			rfcpn = READ_ONCE(rfcp->rfc_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		if (rfcpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 			if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 			    ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 			rfp->rcu_fwd_cb_head = rfcpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 			n_launders++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 			n_launders_sa++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 			rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 			if (WARN_ON_ONCE(!rfcp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 				schedule_timeout_interruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 			n_max_cbs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 			n_launders_sa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 			rfcp->rfc_gps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 			rfcp->rfc_rfp = rfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 		cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 		if (tick_nohz_full_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 			local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 			rcu_momentary_dyntick_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 			local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	stoppedat = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	cver = READ_ONCE(rcu_torture_current_version) - cver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	(void)rcu_torture_fwd_prog_cbfree(rfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	    !shutdown_time_arrived()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 		WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 			 __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 			 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 			 n_launders + n_max_cbs - n_launders_cb_snap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 			 n_launders, n_launders_sa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 			 n_max_gps, n_max_cbs, cver, gps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 		rcu_torture_fwd_cb_hist(rfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	WRITE_ONCE(rcu_fwd_cb_nodelay, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)  * OOM notifier, but this only prints diagnostic information for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)  * current forward-progress test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) static int rcutorture_oom_notify(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 				 unsigned long notused, void *nfreed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	struct rcu_fwd *rfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	mutex_lock(&rcu_fwd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	rfp = rcu_fwds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	if (!rfp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		mutex_unlock(&rcu_fwd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	     __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	rcu_torture_fwd_cb_hist(rfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	WRITE_ONCE(rcu_fwd_emergency_stop, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	pr_info("%s: Freed %lu RCU callbacks.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		__func__, rcu_torture_fwd_prog_cbfree(rfp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	pr_info("%s: Freed %lu RCU callbacks.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		__func__, rcu_torture_fwd_prog_cbfree(rfp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	pr_info("%s: Freed %lu RCU callbacks.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		__func__, rcu_torture_fwd_prog_cbfree(rfp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	smp_mb(); /* Frees before return to avoid redoing OOM. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	pr_info("%s returning after OOM processing.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	mutex_unlock(&rcu_fwd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) static struct notifier_block rcutorture_oom_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	.notifier_call = rcutorture_oom_notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) /* Carry out grace-period forward-progress testing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) static int rcu_torture_fwd_prog(void *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	struct rcu_fwd *rfp = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	int tested = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	int tested_tries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	rcu_bind_current_to_nocb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		set_user_nice(current, MAX_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		WRITE_ONCE(rcu_fwd_emergency_stop, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		if (!IS_ENABLED(CONFIG_TINY_RCU) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		    rcu_inkernel_boot_has_ended())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 			rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		if (rcu_inkernel_boot_has_ended())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 			rcu_torture_fwd_prog_cr(rfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		/* Avoid slow periods, better to test when busy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		stutter_wait("rcu_torture_fwd_prog");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	/* Short runs might not contain a valid forward-progress attempt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	WARN_ON(!tested && tested_tries >= 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	torture_kthread_stopping("rcu_torture_fwd_prog");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) /* If forward-progress checking is requested and feasible, spawn the thread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) static int __init rcu_torture_fwd_prog_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	struct rcu_fwd *rfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	if (!fwd_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		return 0; /* Not requested, so don't do it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	    cur_ops == &rcu_busted_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	if (stall_cpu > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 			return -EINVAL; /* In module, can fail back to user. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		WARN_ON(1); /* Make sure rcutorture notices conflict. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	if (fwd_progress_holdoff <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		fwd_progress_holdoff = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	if (fwd_progress_div <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		fwd_progress_div = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	if (!rfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	spin_lock_init(&rfp->rcu_fwd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	mutex_lock(&rcu_fwd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	rcu_fwds = rfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	mutex_unlock(&rcu_fwd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	register_oom_notifier(&rcutorture_oom_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) static void rcu_torture_fwd_prog_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	struct rcu_fwd *rfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	rfp = rcu_fwds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	mutex_lock(&rcu_fwd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	rcu_fwds = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	mutex_unlock(&rcu_fwd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	unregister_oom_notifier(&rcutorture_oom_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	kfree(rfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) /* Callback function for RCU barrier testing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	atomic_inc(&barrier_cbs_invoked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) /* IPI handler to get callback posted on desired CPU, if online. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) static void rcu_torture_barrier1cb(void *rcu_void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	struct rcu_head *rhp = rcu_void;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	cur_ops->call(rhp, rcu_torture_barrier_cbf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) /* kthread function to register callbacks used to test RCU barriers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) static int rcu_torture_barrier_cbs(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	long myid = (long)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	bool lastphase = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	bool newphase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	init_rcu_head_on_stack(&rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	set_user_nice(current, MAX_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		wait_event(barrier_cbs_wq[myid],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 			   (newphase =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 			    smp_load_acquire(&barrier_phase)) != lastphase ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 			   torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		lastphase = newphase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		if (torture_must_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		 * The above smp_load_acquire() ensures barrier_phase load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		 * is ordered before the following ->call().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		if (smp_call_function_single(myid, rcu_torture_barrier1cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 					     &rcu, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 			// IPI failed, so use direct call from current CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 			cur_ops->call(&rcu, rcu_torture_barrier_cbf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		if (atomic_dec_and_test(&barrier_cbs_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 			wake_up(&barrier_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	if (cur_ops->cb_barrier != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		cur_ops->cb_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	destroy_rcu_head_on_stack(&rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	torture_kthread_stopping("rcu_torture_barrier_cbs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) /* kthread function to drive and coordinate RCU barrier testing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) static int rcu_torture_barrier(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		atomic_set(&barrier_cbs_invoked, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		atomic_set(&barrier_cbs_count, n_barrier_cbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		/* Ensure barrier_phase ordered after prior assignments. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		smp_store_release(&barrier_phase, !barrier_phase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		for (i = 0; i < n_barrier_cbs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 			wake_up(&barrier_cbs_wq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		wait_event(barrier_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 			   atomic_read(&barrier_cbs_count) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 			   torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		if (torture_must_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		n_barrier_attempts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 		if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 			n_rcu_torture_barrier_error++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 			pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 			       atomic_read(&barrier_cbs_invoked),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 			       n_barrier_cbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 			WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 			// Wait manually for the remaining callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 			i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 			do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 				if (WARN_ON(i++ > HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 					i = INT_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 				schedule_timeout_interruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 				cur_ops->cb_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 			} while (atomic_read(&barrier_cbs_invoked) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 				 n_barrier_cbs &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 				 !torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 			smp_mb(); // Can't trust ordering if broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 			if (!torture_must_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 				pr_err("Recovered: barrier_cbs_invoked = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 				       atomic_read(&barrier_cbs_invoked));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 			n_barrier_successes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 		schedule_timeout_interruptible(HZ / 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	torture_kthread_stopping("rcu_torture_barrier");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) /* Initialize RCU barrier testing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) static int rcu_torture_barrier_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	if (n_barrier_cbs <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 		pr_alert("%s" TORTURE_FLAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 			 " Call or barrier ops missing for %s,\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 			 torture_type, cur_ops->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 		pr_alert("%s" TORTURE_FLAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 			 " RCU barrier testing omitted from run.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 			 torture_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	atomic_set(&barrier_cbs_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	atomic_set(&barrier_cbs_invoked, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	barrier_cbs_tasks =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 			GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	barrier_cbs_wq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	for (i = 0; i < n_barrier_cbs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		init_waitqueue_head(&barrier_cbs_wq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		ret = torture_create_kthread(rcu_torture_barrier_cbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 					     (void *)(long)i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 					     barrier_cbs_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) /* Clean up after RCU barrier testing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) static void rcu_torture_barrier_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	torture_stop_kthread(rcu_torture_barrier, barrier_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	if (barrier_cbs_tasks != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		for (i = 0; i < n_barrier_cbs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 			torture_stop_kthread(rcu_torture_barrier_cbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 					     barrier_cbs_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		kfree(barrier_cbs_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		barrier_cbs_tasks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	if (barrier_cbs_wq != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		kfree(barrier_cbs_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		barrier_cbs_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) static bool rcu_torture_can_boost(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	static int boost_warn_once;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	int prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	prio = rcu_get_gp_kthreads_prio();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	if (!prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	if (prio < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 		if (boost_warn_once  == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		boost_warn_once = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) static bool read_exit_child_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) static bool read_exit_child_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) static wait_queue_head_t read_exit_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) // Child kthread which just does an rcutorture reader and exits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) static int rcu_torture_read_exit_child(void *trsp_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	struct torture_random_state *trsp = trsp_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	set_user_nice(current, MAX_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	// Minimize time between reading and exiting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	while (!kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 		schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	(void)rcu_torture_one_read(trsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) // Parent kthread which creates and destroys read-exit child kthreads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) static int rcu_torture_read_exit(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	bool errexit = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	struct task_struct *tsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	DEFINE_TORTURE_RANDOM(trs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	// Allocate and initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	set_user_nice(current, MAX_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	// Each pass through this loop does one read-exit episode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 		if (++count > read_exit_burst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 			VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 			rcu_barrier(); // Wait for task_struct free, avoid OOM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 			for (i = 0; i < read_exit_delay; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 				schedule_timeout_uninterruptible(HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 				if (READ_ONCE(read_exit_child_stop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 			if (!READ_ONCE(read_exit_child_stop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 				VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 			count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 		if (READ_ONCE(read_exit_child_stop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 		// Spawn child.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 		tsp = kthread_run(rcu_torture_read_exit_child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 				     &trs, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 				     "rcu_torture_read_exit_child");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		if (IS_ERR(tsp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 			VERBOSE_TOROUT_ERRSTRING("out of memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 			errexit = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 			tsp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		kthread_stop(tsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		n_read_exits ++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		stutter_wait("rcu_torture_read_exit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	} while (!errexit && !READ_ONCE(read_exit_child_stop));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	// Clean up and exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	smp_store_release(&read_exit_child_stopped, true); // After reaping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	smp_mb(); // Store before wakeup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	wake_up(&read_exit_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	while (!torture_must_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	torture_kthread_stopping("rcu_torture_read_exit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) static int rcu_torture_read_exit_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	if (read_exit_burst <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	init_waitqueue_head(&read_exit_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	read_exit_child_stop = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 	read_exit_child_stopped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	return torture_create_kthread(rcu_torture_read_exit, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 				      read_exit_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) static void rcu_torture_read_exit_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	if (!read_exit_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	WRITE_ONCE(read_exit_child_stop, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	smp_mb(); // Above write before wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 	torture_stop_kthread(rcutorture_read_exit, read_exit_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) static enum cpuhp_state rcutor_hp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) rcu_torture_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	int firsttime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	unsigned long gp_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	if (torture_cleanup_begin()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 		if (cur_ops->cb_barrier != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 			cur_ops->cb_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	if (!cur_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 		torture_cleanup_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	show_rcu_gp_kthreads();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	rcu_torture_read_exit_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	rcu_torture_barrier_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	rcu_torture_fwd_prog_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	torture_stop_kthread(rcu_torture_stall, stall_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	torture_stop_kthread(rcu_torture_writer, writer_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	if (reader_tasks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 		for (i = 0; i < nrealreaders; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 			torture_stop_kthread(rcu_torture_reader,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 					     reader_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 		kfree(reader_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	if (fakewriter_tasks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 		for (i = 0; i < nfakewriters; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 			torture_stop_kthread(rcu_torture_fakewriter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 					     fakewriter_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 		kfree(fakewriter_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 		fakewriter_tasks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	pr_alert("%s:  End-test grace-period state: g%ld f%#x total-gps=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 		 cur_ops->name, (long)gp_seq, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 		 rcutorture_seq_diff(gp_seq, start_gp_seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	torture_stop_kthread(rcu_torture_stats, stats_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	torture_stop_kthread(rcu_torture_fqs, fqs_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	if (rcu_torture_can_boost())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 		cpuhp_remove_state(rcutor_hp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	 * Wait for all RCU callbacks to fire, then do torture-type-specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	 * cleanup operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	if (cur_ops->cb_barrier != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		cur_ops->cb_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	if (cur_ops->cleanup != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 		cur_ops->cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	if (err_segs_recorded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		pr_alert("Failure/close-call rcutorture reader segments:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 		if (rt_read_nsegs == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 			pr_alert("\t: No segments recorded!!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		firsttime = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 		for (i = 0; i < rt_read_nsegs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 			pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 			if (err_segs[i].rt_delay_jiffies != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 				pr_cont("%s%ldjiffies", firsttime ? "" : "+",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 					err_segs[i].rt_delay_jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 				firsttime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 			if (err_segs[i].rt_delay_ms != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 				pr_cont("%s%ldms", firsttime ? "" : "+",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 					err_segs[i].rt_delay_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 				firsttime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 			if (err_segs[i].rt_delay_us != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 				pr_cont("%s%ldus", firsttime ? "" : "+",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 					err_segs[i].rt_delay_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 				firsttime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 			pr_cont("%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 				err_segs[i].rt_preempted ? "preempted" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 		rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	else if (torture_onoff_failures())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 		rcu_torture_print_module_parms(cur_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 					       "End of test: RCU_HOTPLUG");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 		rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	torture_cleanup_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) static void rcu_torture_leak_cb(struct rcu_head *rhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) static void rcu_torture_err_cb(struct rcu_head *rhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	 * This -might- happen due to race conditions, but is unlikely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	 * The scenario that leads to this happening is that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	 * first of the pair of duplicate callbacks is queued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	 * someone else starts a grace period that includes that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	 * callback, then the second of the pair must wait for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	 * next grace period.  Unlikely, but can happen.  If it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	 * does happen, the debug-objects subsystem won't have splatted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)  * Verify that double-free causes debug-objects to complain, but only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)  * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)  * cannot be carried out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) static void rcu_test_debug_objects(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	struct rcu_head rh1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	struct rcu_head rh2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	init_rcu_head_on_stack(&rh1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	init_rcu_head_on_stack(&rh2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	/* Try to queue the rh2 pair of callbacks for the same grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	preempt_disable(); /* Prevent preemption from interrupting test. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	rcu_read_lock(); /* Make it impossible to finish a grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	local_irq_disable(); /* Make it harder to start a new grace period. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	call_rcu(&rh2, rcu_torture_leak_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	/* Wait for them all to get done so we can safely return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	destroy_rcu_head_on_stack(&rh1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	destroy_rcu_head_on_stack(&rh2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) static void rcutorture_sync(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	static unsigned long n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	if (cur_ops->sync && !(++n & 0xfff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		cur_ops->sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) rcu_torture_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	int firsterr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	unsigned long gp_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	static struct rcu_torture_ops *torture_ops[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 		&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 		&busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 		&tasks_tracing_ops, &trivial_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	if (!torture_init_begin(torture_type, verbose))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	/* Process args and tell the world that the torturer is on the job. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 		cur_ops = torture_ops[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 		if (strcmp(torture_type, cur_ops->name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	if (i == ARRAY_SIZE(torture_ops)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 		pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 			 torture_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 		pr_alert("rcu-torture types:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 			pr_cont(" %s", torture_ops[i]->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 		pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 		WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 		firsterr = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 		cur_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	if (cur_ops->fqs == NULL && fqs_duration != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 		fqs_duration = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	if (cur_ops->init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 		cur_ops->init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	if (nreaders >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 		nrealreaders = nreaders;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 		nrealreaders = num_online_cpus() - 2 - nreaders;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		if (nrealreaders <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 			nrealreaders = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	rcu_torture_print_module_parms(cur_ops, "Start of test");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	start_gp_seq = gp_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	pr_alert("%s:  Start-test grace-period state: g%ld f%#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		 cur_ops->name, (long)gp_seq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	/* Set up the freelist. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	INIT_LIST_HEAD(&rcu_torture_freelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 		rcu_tortures[i].rtort_mbtest = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 		list_add_tail(&rcu_tortures[i].rtort_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 			      &rcu_torture_freelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	/* Initialize the statistics so that each run gets its own numbers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	rcu_torture_current = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	rcu_torture_current_version = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	atomic_set(&n_rcu_torture_alloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	atomic_set(&n_rcu_torture_alloc_fail, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	atomic_set(&n_rcu_torture_free, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	atomic_set(&n_rcu_torture_mberror, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	atomic_set(&n_rcu_torture_error, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	n_rcu_torture_barrier_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	n_rcu_torture_boost_ktrerror = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 	n_rcu_torture_boost_rterror = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	n_rcu_torture_boost_failure = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 	n_rcu_torture_boosts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 		atomic_set(&rcu_torture_wcount[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 			per_cpu(rcu_torture_count, cpu)[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 			per_cpu(rcu_torture_batch, cpu)[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	err_segs_recorded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	rt_read_nsegs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 	/* Start up the kthreads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	firsterr = torture_create_kthread(rcu_torture_writer, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 					  writer_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	if (nfakewriters > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 		fakewriter_tasks = kcalloc(nfakewriters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 					   sizeof(fakewriter_tasks[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 					   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 		if (fakewriter_tasks == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 			VERBOSE_TOROUT_ERRSTRING("out of memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 			firsterr = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	for (i = 0; i < nfakewriters; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		firsterr = torture_create_kthread(rcu_torture_fakewriter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 						  NULL, fakewriter_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 			       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	if (reader_tasks == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 		VERBOSE_TOROUT_ERRSTRING("out of memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 		firsterr = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	for (i = 0; i < nrealreaders; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 		firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 						  reader_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	if (stat_interval > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 		firsterr = torture_create_kthread(rcu_torture_stats, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 						  stats_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	if (test_no_idle_hz && shuffle_interval > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		firsterr = torture_shuffle_init(shuffle_interval * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	if (stutter < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 		stutter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	if (stutter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 		int t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 		firsterr = torture_stutter_init(stutter * HZ, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	if (fqs_duration < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 		fqs_duration = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	if (fqs_duration) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 		/* Create the fqs thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 		firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 						  fqs_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	if (test_boost_interval < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 		test_boost_interval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 	if (test_boost_duration < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 		test_boost_duration = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	if (rcu_torture_can_boost()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 		boost_starttime = jiffies + test_boost_interval * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 		firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 					     rcutorture_booster_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 					     rcutorture_booster_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 		if (firsterr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 		rcutor_hp = firsterr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	shutdown_jiffies = jiffies + shutdown_secs * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 				      rcutorture_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	firsterr = rcu_torture_stall_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	firsterr = rcu_torture_fwd_prog_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 	if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 	firsterr = rcu_torture_barrier_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	firsterr = rcu_torture_read_exit_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	if (object_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 		rcu_test_debug_objects();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	torture_init_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) unwind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	torture_init_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 	rcu_torture_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	return firsterr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) module_init(rcu_torture_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) module_exit(rcu_torture_cleanup);