Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Read-Copy Update module-based scalability-test facility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) IBM Corporation, 2015
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #define pr_fmt(fmt) fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <uapi/linux/sched/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/srcu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <linux/torture.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #include "rcu.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define SCALE_FLAG "-scale:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define SCALEOUT_STRING(s) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define VERBOSE_SCALEOUT_STRING(s) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	do { if (verbose) pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define VERBOSE_SCALEOUT_ERRSTRING(s) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * The intended use cases for the nreaders and nwriters module parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * are as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * 1.	Specify only the nr_cpus kernel boot parameter.  This will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  *	set both nreaders and nwriters to the value specified by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  *	nr_cpus for a mixed reader/writer test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * 2.	Specify the nr_cpus kernel boot parameter, but set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  *	rcuscale.nreaders to zero.  This will set nwriters to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  *	value specified by nr_cpus for an update-only test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * 3.	Specify the nr_cpus kernel boot parameter, but set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  *	rcuscale.nwriters to zero.  This will set nreaders to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  *	value specified by nr_cpus for a read-only test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * Various other use cases may of course be specified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * Note that this test's readers are intended only as a test load for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * the writers.  The reader scalability statistics will be overly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * pessimistic due to the per-critical-section interrupt disabling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * test-end checks, and the pair of calls through pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #ifdef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) # define RCUSCALE_SHUTDOWN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) # define RCUSCALE_SHUTDOWN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) torture_param(int, nreaders, -1, "Number of RCU reader threads");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) torture_param(int, nwriters, -1, "Number of RCU updater threads");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) torture_param(bool, shutdown, RCUSCALE_SHUTDOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	      "Shutdown at end of scalability tests.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() scale test?");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) static char *scale_type = "rcu";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) module_param(scale_type, charp, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) MODULE_PARM_DESC(scale_type, "Type of RCU to scalability-test (rcu, srcu, ...)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static int nrealreaders;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static int nrealwriters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static struct task_struct **writer_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static struct task_struct **reader_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static struct task_struct *shutdown_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static u64 **writer_durations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int *writer_n_durations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static atomic_t n_rcu_scale_reader_started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static atomic_t n_rcu_scale_writer_started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static atomic_t n_rcu_scale_writer_finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static wait_queue_head_t shutdown_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static u64 t_rcu_scale_writer_started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static u64 t_rcu_scale_writer_finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static unsigned long b_rcu_gp_test_started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static unsigned long b_rcu_gp_test_finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static DEFINE_PER_CPU(atomic_t, n_async_inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define MAX_MEAS 10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define MIN_MEAS 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  * Operations vector for selecting different types of tests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct rcu_scale_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	int ptype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	void (*init)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	void (*cleanup)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	int (*readlock)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	void (*readunlock)(int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	unsigned long (*get_gp_seq)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	unsigned long (*exp_completed)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	void (*async)(struct rcu_head *head, rcu_callback_t func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	void (*gp_barrier)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	void (*sync)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	void (*exp_sync)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static struct rcu_scale_ops *cur_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  * Definitions for rcu scalability testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static int rcu_scale_read_lock(void) __acquires(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void rcu_scale_read_unlock(int idx) __releases(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static unsigned long __maybe_unused rcu_no_completed(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static void rcu_sync_scale_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static struct rcu_scale_ops rcu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	.ptype		= RCU_FLAVOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	.init		= rcu_sync_scale_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	.readlock	= rcu_scale_read_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	.readunlock	= rcu_scale_read_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	.get_gp_seq	= rcu_get_gp_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	.gp_diff	= rcu_seq_diff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	.exp_completed	= rcu_exp_batches_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	.async		= call_rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	.gp_barrier	= rcu_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	.sync		= synchronize_rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	.exp_sync	= synchronize_rcu_expedited,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	.name		= "rcu"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * Definitions for srcu scalability testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) DEFINE_STATIC_SRCU(srcu_ctl_scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static struct srcu_struct *srcu_ctlp = &srcu_ctl_scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static int srcu_scale_read_lock(void) __acquires(srcu_ctlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	return srcu_read_lock(srcu_ctlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static void srcu_scale_read_unlock(int idx) __releases(srcu_ctlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	srcu_read_unlock(srcu_ctlp, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static unsigned long srcu_scale_completed(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	return srcu_batches_completed(srcu_ctlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	call_srcu(srcu_ctlp, head, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static void srcu_rcu_barrier(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	srcu_barrier(srcu_ctlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void srcu_scale_synchronize(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	synchronize_srcu(srcu_ctlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static void srcu_scale_synchronize_expedited(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	synchronize_srcu_expedited(srcu_ctlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static struct rcu_scale_ops srcu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	.ptype		= SRCU_FLAVOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	.init		= rcu_sync_scale_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	.readlock	= srcu_scale_read_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	.readunlock	= srcu_scale_read_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	.get_gp_seq	= srcu_scale_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	.gp_diff	= rcu_seq_diff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	.exp_completed	= srcu_scale_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	.async		= srcu_call_rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	.gp_barrier	= srcu_rcu_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	.sync		= srcu_scale_synchronize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	.exp_sync	= srcu_scale_synchronize_expedited,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	.name		= "srcu"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static struct srcu_struct srcud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void srcu_sync_scale_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	srcu_ctlp = &srcud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	init_srcu_struct(srcu_ctlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void srcu_sync_scale_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	cleanup_srcu_struct(srcu_ctlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static struct rcu_scale_ops srcud_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	.ptype		= SRCU_FLAVOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	.init		= srcu_sync_scale_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	.cleanup	= srcu_sync_scale_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	.readlock	= srcu_scale_read_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	.readunlock	= srcu_scale_read_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	.get_gp_seq	= srcu_scale_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	.gp_diff	= rcu_seq_diff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	.exp_completed	= srcu_scale_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	.async		= srcu_call_rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	.gp_barrier	= srcu_rcu_barrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	.sync		= srcu_scale_synchronize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	.exp_sync	= srcu_scale_synchronize_expedited,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	.name		= "srcud"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  * Definitions for RCU-tasks scalability testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static int tasks_scale_read_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static void tasks_scale_read_unlock(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static struct rcu_scale_ops tasks_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	.ptype		= RCU_TASKS_FLAVOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	.init		= rcu_sync_scale_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	.readlock	= tasks_scale_read_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	.readunlock	= tasks_scale_read_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	.get_gp_seq	= rcu_no_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	.gp_diff	= rcu_seq_diff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	.async		= call_rcu_tasks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	.gp_barrier	= rcu_barrier_tasks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	.sync		= synchronize_rcu_tasks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	.exp_sync	= synchronize_rcu_tasks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	.name		= "tasks"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	if (!cur_ops->gp_diff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		return new - old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	return cur_ops->gp_diff(new, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  * If scalability tests complete, wait for shutdown to commence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void rcu_scale_wait_shutdown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	cond_resched_tasks_rcu_qs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	if (atomic_read(&n_rcu_scale_writer_finished) < nrealwriters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	while (!torture_must_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  * RCU scalability reader kthread.  Repeatedly does empty RCU read-side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  * critical section, minimizing update-side interference.  However, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  * point of this test is not to evaluate reader scalability, but instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  * to serve as a test load for update-side scalability testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) rcu_scale_reader(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	long me = (long)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	VERBOSE_SCALEOUT_STRING("rcu_scale_reader task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	set_user_nice(current, MAX_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	atomic_inc(&n_rcu_scale_reader_started);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		idx = cur_ops->readlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		cur_ops->readunlock(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		rcu_scale_wait_shutdown();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	torture_kthread_stopping("rcu_scale_reader");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  * Callback function for asynchronous grace periods from rcu_scale_writer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static void rcu_scale_async_cb(struct rcu_head *rhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	atomic_dec(this_cpu_ptr(&n_async_inflight));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	kfree(rhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  * RCU scale writer kthread.  Repeatedly does a grace period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) rcu_scale_writer(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	int i_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	long me = (long)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	struct rcu_head *rhp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	bool started = false, done = false, alldone = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	u64 t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	u64 *wdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	u64 *wdpp = writer_durations[me];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	VERBOSE_SCALEOUT_STRING("rcu_scale_writer task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	WARN_ON(!wdpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	sched_set_fifo_low(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	if (holdoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		schedule_timeout_uninterruptible(holdoff * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	 * Wait until rcu_end_inkernel_boot() is called for normal GP tests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	 * so that RCU is not always expedited for normal GP tests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	 * The system_state test is approximate, but works well in practice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	while (!gp_exp && system_state != SYSTEM_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	t = ktime_get_mono_fast_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	if (atomic_inc_return(&n_rcu_scale_writer_started) >= nrealwriters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		t_rcu_scale_writer_started = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		if (gp_exp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 			b_rcu_gp_test_started =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 				cur_ops->exp_completed() / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			b_rcu_gp_test_started = cur_ops->get_gp_seq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		if (writer_holdoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			udelay(writer_holdoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		wdp = &wdpp[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		*wdp = ktime_get_mono_fast_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		if (gp_async) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 			if (!rhp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 				rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 			if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 				atomic_inc(this_cpu_ptr(&n_async_inflight));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 				cur_ops->async(rhp, rcu_scale_async_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 				rhp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 			} else if (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 				cur_ops->gp_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 				goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 				kfree(rhp); /* Because we are stopping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		} else if (gp_exp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			cur_ops->exp_sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			cur_ops->sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		t = ktime_get_mono_fast_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		*wdp = t - *wdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		i_max = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		if (!started &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		    atomic_read(&n_rcu_scale_writer_started) >= nrealwriters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			started = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		if (!done && i >= MIN_MEAS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 			done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 			sched_set_normal(current, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 			pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 				 scale_type, SCALE_FLAG, me, MIN_MEAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			if (atomic_inc_return(&n_rcu_scale_writer_finished) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 			    nrealwriters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 				schedule_timeout_interruptible(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 				rcu_ftrace_dump(DUMP_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 				SCALEOUT_STRING("Test complete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 				t_rcu_scale_writer_finished = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 				if (gp_exp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 					b_rcu_gp_test_finished =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 						cur_ops->exp_completed() / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 					b_rcu_gp_test_finished =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 						cur_ops->get_gp_seq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 				if (shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 					smp_mb(); /* Assign before wake. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 					wake_up(&shutdown_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		if (done && !alldone &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		    atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 			alldone = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		if (started && !alldone && i < MAX_MEAS - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		rcu_scale_wait_shutdown();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	if (gp_async) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		cur_ops->gp_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	writer_n_durations[me] = i_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	torture_kthread_stopping("rcu_scale_writer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	pr_alert("%s" SCALE_FLAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		 scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) rcu_scale_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	int ngps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	u64 *wdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	u64 *wdpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	 * Would like warning at start, but everything is expedited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	 * during the mid-boot phase, so have to wait till the end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		VERBOSE_SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	if (rcu_gp_is_normal() && gp_exp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		VERBOSE_SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	if (gp_exp && gp_async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		VERBOSE_SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	if (torture_cleanup_begin())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	if (!cur_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		torture_cleanup_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	if (reader_tasks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		for (i = 0; i < nrealreaders; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 			torture_stop_kthread(rcu_scale_reader,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 					     reader_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		kfree(reader_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	if (writer_tasks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		for (i = 0; i < nrealwriters; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 			torture_stop_kthread(rcu_scale_writer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 					     writer_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 			if (!writer_n_durations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 			j = writer_n_durations[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 			pr_alert("%s%s writer %d gps: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 				 scale_type, SCALE_FLAG, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 			ngps += j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 			 scale_type, SCALE_FLAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 			 t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 			 t_rcu_scale_writer_finished -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 			 t_rcu_scale_writer_started,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 			 ngps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 			 rcuscale_seq_diff(b_rcu_gp_test_finished,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 					   b_rcu_gp_test_started));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		for (i = 0; i < nrealwriters; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 			if (!writer_durations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 			if (!writer_n_durations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 			wdpp = writer_durations[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 			if (!wdpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 			for (j = 0; j <= writer_n_durations[i]; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 				wdp = &wdpp[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 				pr_alert("%s%s %4d writer-duration: %5d %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 					scale_type, SCALE_FLAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 					i, j, *wdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 				if (j % 100 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 					schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 			kfree(writer_durations[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		kfree(writer_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		kfree(writer_durations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		kfree(writer_n_durations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	/* Do torture-type-specific cleanup operations.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	if (cur_ops->cleanup != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		cur_ops->cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	torture_cleanup_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)  * Return the number if non-negative.  If -1, the number of CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)  * If less than -1, that much less than the number of CPUs, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)  * at least one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static int compute_real(int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	int nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	if (n >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		nr = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		nr = num_online_cpus() + 1 + n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		if (nr <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			nr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	return nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)  * RCU scalability shutdown kthread.  Just waits to be awakened, then shuts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)  * down system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) rcu_scale_shutdown(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	wait_event(shutdown_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		   atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	smp_mb(); /* Wake before output. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	rcu_scale_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	kernel_power_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)  * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)  * of iterations and measure total time and number of GP for all iterations to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu().");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static struct task_struct **kfree_reader_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static int kfree_nrealthreads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static atomic_t n_kfree_scale_thread_started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static atomic_t n_kfree_scale_thread_ended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct kfree_obj {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	char kfree_obj[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	struct rcu_head rh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) kfree_scale_thread(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	int i, loop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	long me = (long)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	struct kfree_obj *alloc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	u64 start_time, end_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	long long mem_begin, mem_during = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	VERBOSE_SCALEOUT_STRING("kfree_scale_thread task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	set_user_nice(current, MAX_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	start_time = ktime_get_mono_fast_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	if (atomic_inc_return(&n_kfree_scale_thread_started) >= kfree_nrealthreads) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		if (gp_exp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 			b_rcu_gp_test_started = cur_ops->exp_completed() / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 			b_rcu_gp_test_started = cur_ops->get_gp_seq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		if (!mem_during) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 			mem_during = mem_begin = si_mem_available();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 		} else if (loop % (kfree_loops / 4) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 			mem_during = (mem_during + si_mem_available()) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 		for (i = 0; i < kfree_alloc_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 			alloc_ptr = kmalloc(kfree_mult * sizeof(struct kfree_obj), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 			if (!alloc_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 			kfree_rcu(alloc_ptr, rh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	} while (!torture_must_stop() && ++loop < kfree_loops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	if (atomic_inc_return(&n_kfree_scale_thread_ended) >= kfree_nrealthreads) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		end_time = ktime_get_mono_fast_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		if (gp_exp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 			b_rcu_gp_test_finished = cur_ops->exp_completed() / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 			b_rcu_gp_test_finished = cur_ops->get_gp_seq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 		       (unsigned long long)(end_time - start_time), kfree_loops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 		       rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		       (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		if (shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 			smp_mb(); /* Assign before wake. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 			wake_up(&shutdown_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	torture_kthread_stopping("kfree_scale_thread");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) kfree_scale_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	if (torture_cleanup_begin())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	if (kfree_reader_tasks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		for (i = 0; i < kfree_nrealthreads; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 			torture_stop_kthread(kfree_scale_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 					     kfree_reader_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		kfree(kfree_reader_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	torture_cleanup_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)  * shutdown kthread.  Just waits to be awakened, then shuts down system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) kfree_scale_shutdown(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	wait_event(shutdown_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 		   atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	smp_mb(); /* Wake before output. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	kfree_scale_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	kernel_power_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) kfree_scale_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	int firsterr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	kfree_nrealthreads = compute_real(kfree_nthreads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	/* Start up the kthreads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	if (shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 		init_waitqueue_head(&shutdown_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 		firsterr = torture_create_kthread(kfree_scale_shutdown, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 						  shutdown_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 		schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 			       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	if (kfree_reader_tasks == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 		firsterr = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	for (i = 0; i < kfree_nrealthreads; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 		firsterr = torture_create_kthread(kfree_scale_thread, (void *)i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 						  kfree_reader_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	while (atomic_read(&n_kfree_scale_thread_started) < kfree_nrealthreads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 		schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	torture_init_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) unwind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	torture_init_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	kfree_scale_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	return firsterr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) rcu_scale_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	int firsterr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	static struct rcu_scale_ops *scale_ops[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 		&rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	if (!torture_init_begin(scale_type, verbose))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	/* Process args and announce that the scalability'er is on the job. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 		cur_ops = scale_ops[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 		if (strcmp(scale_type, cur_ops->name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	if (i == ARRAY_SIZE(scale_ops)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 		pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 		pr_alert("rcu-scale types:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 		for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 			pr_cont(" %s", scale_ops[i]->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 		pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 		WARN_ON(!IS_MODULE(CONFIG_RCU_SCALE_TEST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 		firsterr = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 		cur_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	if (cur_ops->init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 		cur_ops->init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	if (kfree_rcu_test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 		return kfree_scale_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 	nrealwriters = compute_real(nwriters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	nrealreaders = compute_real(nreaders);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	atomic_set(&n_rcu_scale_reader_started, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	atomic_set(&n_rcu_scale_writer_started, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	atomic_set(&n_rcu_scale_writer_finished, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	rcu_scale_print_module_parms(cur_ops, "Start of test");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	/* Start up the kthreads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	if (shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 		init_waitqueue_head(&shutdown_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 		firsterr = torture_create_kthread(rcu_scale_shutdown, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 						  shutdown_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 		schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 			       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	if (reader_tasks == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 		VERBOSE_SCALEOUT_ERRSTRING("out of memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 		firsterr = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	for (i = 0; i < nrealreaders; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 		firsterr = torture_create_kthread(rcu_scale_reader, (void *)i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 						  reader_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 		schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 			       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 	writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 				   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 	writer_n_durations =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 		kcalloc(nrealwriters, sizeof(*writer_n_durations),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 			GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 	if (!writer_tasks || !writer_durations || !writer_n_durations) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 		VERBOSE_SCALEOUT_ERRSTRING("out of memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 		firsterr = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 	for (i = 0; i < nrealwriters; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 		writer_durations[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 			kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 		if (!writer_durations[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 			firsterr = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 		firsterr = torture_create_kthread(rcu_scale_writer, (void *)i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 						  writer_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	torture_init_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) unwind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 	torture_init_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 	rcu_scale_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 	return firsterr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) module_init(rcu_scale_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) module_exit(rcu_scale_cleanup);