Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) // Torture test for smp_call_function() and friends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) // Copyright (C) Facebook, 2020.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) // Author: Paul E. McKenney <paulmck@kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #define pr_fmt(fmt) fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/rcupdate_trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/srcu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/torture.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define SCFTORT_STRING "scftorture"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define SCFTORT_FLAG SCFTORT_STRING ": "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define SCFTORTOUT(s, x...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	pr_alert(SCFTORT_FLAG s, ## x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define VERBOSE_SCFTORTOUT(s, x...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	do { if (verbose) pr_alert(SCFTORT_FLAG s, ## x); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define VERBOSE_SCFTORTOUT_ERRSTRING(s, x...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	do { if (verbose) pr_alert(SCFTORT_FLAG "!!! " s, ## x); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) MODULE_AUTHOR("Paul E. McKenney <paulmck@kernel.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) // Wait until there are multiple CPUs before starting test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) torture_param(int, holdoff, IS_BUILTIN(CONFIG_SCF_TORTURE_TEST) ? 10 : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	      "Holdoff time before test start (s)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) torture_param(int, longwait, 0, "Include ridiculously long waits? (seconds)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) torture_param(int, nthreads, -1, "# threads, defaults to -1 for all CPUs.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) torture_param(int, shutdown_secs, 0, "Shutdown time (ms), <= zero to disable.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) torture_param(int, stutter_cpus, 5, "Number of jiffies to change CPUs under test, 0=disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) torture_param(bool, use_cpus_read_lock, 0, "Use cpus_read_lock() to exclude CPU hotplug.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) torture_param(int, weight_single, -1, "Testing weight for single-CPU no-wait operations.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) torture_param(int, weight_single_wait, -1, "Testing weight for single-CPU operations.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) torture_param(int, weight_many, -1, "Testing weight for multi-CPU no-wait operations.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) torture_param(int, weight_many_wait, -1, "Testing weight for multi-CPU operations.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) torture_param(int, weight_all, -1, "Testing weight for all-CPU no-wait operations.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) torture_param(int, weight_all_wait, -1, "Testing weight for all-CPU operations.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) char *torture_type = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #ifdef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) # define SCFTORT_SHUTDOWN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) # define SCFTORT_SHUTDOWN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) torture_param(bool, shutdown, SCFTORT_SHUTDOWN, "Shutdown at end of torture test.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) struct scf_statistics {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	long long n_single;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	long long n_single_ofl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	long long n_single_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	long long n_single_wait_ofl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	long long n_many;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	long long n_many_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	long long n_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	long long n_all_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static struct scf_statistics *scf_stats_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) static struct task_struct *scf_torture_stats_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static DEFINE_PER_CPU(long long, scf_invoked_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) // Data for random primitive selection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define SCF_PRIM_SINGLE		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define SCF_PRIM_MANY		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define SCF_PRIM_ALL		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define SCF_NPRIMS		(2 * 3) // Need wait and no-wait versions of each.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static char *scf_prim_name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	"smp_call_function_single",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	"smp_call_function_many",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	"smp_call_function",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct scf_selector {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	unsigned long scfs_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	int scfs_prim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	bool scfs_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static struct scf_selector scf_sel_array[SCF_NPRIMS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static int scf_sel_array_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static unsigned long scf_sel_totweight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) // Communicate between caller and handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct scf_check {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	bool scfc_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	bool scfc_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	int scfc_cpu; // -1 for not _single().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	bool scfc_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) // Use to wait for all threads to start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static atomic_t n_started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static atomic_t n_errs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static atomic_t n_mb_in_errs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static atomic_t n_mb_out_errs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static atomic_t n_alloc_errs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static bool scfdone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static char *bangstr = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static DEFINE_TORTURE_RANDOM_PERCPU(scf_torture_rand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) // Print torture statistics.  Caller must ensure serialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static void scf_torture_stats_print(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	long long invoked_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	bool isdone = READ_ONCE(scfdone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	struct scf_statistics scfs = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		invoked_count += data_race(per_cpu(scf_invoked_count, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	for (i = 0; i < nthreads; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		scfs.n_single += scf_stats_p[i].n_single;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		scfs.n_single_ofl += scf_stats_p[i].n_single_ofl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		scfs.n_single_wait += scf_stats_p[i].n_single_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		scfs.n_single_wait_ofl += scf_stats_p[i].n_single_wait_ofl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		scfs.n_many += scf_stats_p[i].n_many;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		scfs.n_many_wait += scf_stats_p[i].n_many_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		scfs.n_all += scf_stats_p[i].n_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		scfs.n_all_wait += scf_stats_p[i].n_all_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	    atomic_read(&n_mb_out_errs) || atomic_read(&n_alloc_errs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		bangstr = "!!! ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	pr_alert("%s %sscf_invoked_count %s: %lld single: %lld/%lld single_ofl: %lld/%lld many: %lld/%lld all: %lld/%lld ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		 SCFTORT_FLAG, bangstr, isdone ? "VER" : "ver", invoked_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		 scfs.n_single, scfs.n_single_wait, scfs.n_single_ofl, scfs.n_single_wait_ofl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		 scfs.n_many, scfs.n_many_wait, scfs.n_all, scfs.n_all_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	torture_onoff_stats();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	pr_cont("ste: %d stnmie: %d stnmoe: %d staf: %d\n", atomic_read(&n_errs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		atomic_read(&n_mb_in_errs), atomic_read(&n_mb_out_errs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		atomic_read(&n_alloc_errs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) // Periodically prints torture statistics, if periodic statistics printing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) // was specified via the stat_interval module parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) scf_torture_stats(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	VERBOSE_TOROUT_STRING("scf_torture_stats task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		schedule_timeout_interruptible(stat_interval * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		scf_torture_stats_print();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		torture_shutdown_absorb("scf_torture_stats");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	torture_kthread_stopping("scf_torture_stats");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) // Add a primitive to the scf_sel_array[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void scf_sel_add(unsigned long weight, int prim, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	struct scf_selector *scfsp = &scf_sel_array[scf_sel_array_len];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	// If no weight, if array would overflow, if computing three-place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	// percentages would overflow, or if the scf_prim_name[] array would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	// overflow, don't bother.  In the last three two cases, complain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	if (!weight ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	    WARN_ON_ONCE(scf_sel_array_len >= ARRAY_SIZE(scf_sel_array)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	    WARN_ON_ONCE(0 - 100000 * weight <= 100000 * scf_sel_totweight) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	    WARN_ON_ONCE(prim >= ARRAY_SIZE(scf_prim_name)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	scf_sel_totweight += weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	scfsp->scfs_weight = scf_sel_totweight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	scfsp->scfs_prim = prim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	scfsp->scfs_wait = wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	scf_sel_array_len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) // Dump out weighting percentages for scf_prim_name[] array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static void scf_sel_dump(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	unsigned long oldw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	struct scf_selector *scfsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	unsigned long w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	for (i = 0; i < scf_sel_array_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		scfsp = &scf_sel_array[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		w = (scfsp->scfs_weight - oldw) * 100000 / scf_sel_totweight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		pr_info("%s: %3lu.%03lu %s(%s)\n", __func__, w / 1000, w % 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			scf_prim_name[scfsp->scfs_prim],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			scfsp->scfs_wait ? "wait" : "nowait");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		oldw = scfsp->scfs_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) // Randomly pick a primitive and wait/nowait, based on weightings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static struct scf_selector *scf_sel_rand(struct torture_random_state *trsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	unsigned long w = torture_random(trsp) % (scf_sel_totweight + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	for (i = 0; i < scf_sel_array_len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		if (scf_sel_array[i].scfs_weight >= w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			return &scf_sel_array[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	return &scf_sel_array[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) // Update statistics and occasionally burn up mass quantities of CPU time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) // if told to do so via scftorture.longwait.  Otherwise, occasionally burn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) // a little bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void scf_handler(void *scfc_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	unsigned long r = torture_random(this_cpu_ptr(&scf_torture_rand));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	struct scf_check *scfcp = scfc_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	if (likely(scfcp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		WRITE_ONCE(scfcp->scfc_out, false); // For multiple receivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		if (WARN_ON_ONCE(unlikely(!READ_ONCE(scfcp->scfc_in))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			atomic_inc(&n_mb_in_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	this_cpu_inc(scf_invoked_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	if (longwait <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		if (!(r & 0xffc0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			udelay(r & 0x3f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	if (r & 0xfff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	r = (r >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (longwait <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		udelay((r & 0xff) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	r = r % longwait + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	for (i = 0; i < r; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		for (j = 0; j < 1000; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 			cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (unlikely(!scfcp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	if (scfcp->scfc_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		WRITE_ONCE(scfcp->scfc_out, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		kfree(scfcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) // As above, but check for correct CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static void scf_handler_1(void *scfc_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	struct scf_check *scfcp = scfc_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (likely(scfcp) && WARN_ONCE(smp_processor_id() != scfcp->scfc_cpu, "%s: Wanted CPU %d got CPU %d\n", __func__, scfcp->scfc_cpu, smp_processor_id())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		atomic_inc(&n_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	scf_handler(scfcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) // Randomly do an smp_call_function*() invocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_random_state *trsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	uintptr_t cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	struct scf_check *scfcp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	struct scf_selector *scfsp = scf_sel_rand(trsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	if (use_cpus_read_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		cpus_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		if (WARN_ON_ONCE(!scfcp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			atomic_inc(&n_alloc_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			scfcp->scfc_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			scfcp->scfc_wait = scfsp->scfs_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			scfcp->scfc_out = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	switch (scfsp->scfs_prim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	case SCF_PRIM_SINGLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		cpu = torture_random(trsp) % nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		if (scfsp->scfs_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 			scfp->n_single_wait++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 			scfp->n_single++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		if (scfcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 			scfcp->scfc_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			barrier(); // Prevent race-reduction compiler optimizations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			scfcp->scfc_in = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, scfsp->scfs_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 			if (scfsp->scfs_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 				scfp->n_single_wait_ofl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 				scfp->n_single_ofl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			kfree(scfcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			scfcp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	case SCF_PRIM_MANY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		if (scfsp->scfs_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			scfp->n_many_wait++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 			scfp->n_many++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		if (scfcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			barrier(); // Prevent race-reduction compiler optimizations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			scfcp->scfc_in = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		smp_call_function_many(cpu_online_mask, scf_handler, scfcp, scfsp->scfs_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	case SCF_PRIM_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		if (scfsp->scfs_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			scfp->n_all_wait++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			scfp->n_all++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		if (scfcp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			barrier(); // Prevent race-reduction compiler optimizations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			scfcp->scfc_in = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		smp_call_function(scf_handler, scfcp, scfsp->scfs_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		if (scfcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			scfcp->scfc_out = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	if (scfcp && scfsp->scfs_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		if (WARN_ON_ONCE((num_online_cpus() > 1 || scfsp->scfs_prim == SCF_PRIM_SINGLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 				 !scfcp->scfc_out))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 			atomic_inc(&n_mb_out_errs); // Leak rather than trash!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			kfree(scfcp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		barrier(); // Prevent race-reduction compiler optimizations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (use_cpus_read_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		cpus_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	if (!(torture_random(trsp) & 0xfff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) // SCF test kthread.  Repeatedly does calls to members of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) // smp_call_function() family of functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static int scftorture_invoker(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	DEFINE_TORTURE_RANDOM(rand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	struct scf_statistics *scfp = (struct scf_statistics *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	bool was_offline = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	VERBOSE_SCFTORTOUT("scftorture_invoker %d: task started", scfp->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	cpu = scfp->cpu % nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	set_cpus_allowed_ptr(current, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	set_user_nice(current, MAX_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	if (holdoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		schedule_timeout_interruptible(holdoff * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	// Make sure that the CPU is affinitized appropriately during testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	WARN_ON_ONCE(smp_processor_id() != scfp->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	if (!atomic_dec_return(&n_started))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		while (atomic_read_acquire(&n_started)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 			if (torture_must_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 				VERBOSE_SCFTORTOUT("scftorture_invoker %d ended before starting", scfp->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 				goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 			schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	VERBOSE_SCFTORTOUT("scftorture_invoker %d started", scfp->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		scftorture_invoke_one(scfp, &rand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		while (cpu_is_offline(cpu) && !torture_must_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			schedule_timeout_interruptible(HZ / 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			was_offline = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		if (was_offline) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			set_cpus_allowed_ptr(current, cpumask_of(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 			was_offline = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	VERBOSE_SCFTORTOUT("scftorture_invoker %d ended", scfp->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	torture_kthread_stopping("scftorture_invoker");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) scftorture_print_module_parms(const char *tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	pr_alert(SCFTORT_FLAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		 "--- %s:  verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter_cpus=%d use_cpus_read_lock=%d, weight_single=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		 verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stutter_cpus, use_cpus_read_lock, weight_single, weight_single_wait, weight_many, weight_many_wait, weight_all, weight_all_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static void scf_cleanup_handler(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static void scf_torture_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	if (torture_cleanup_begin())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	WRITE_ONCE(scfdone, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	if (nthreads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		for (i = 0; i < nthreads; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			torture_stop_kthread("scftorture_invoker", scf_stats_p[i].task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	smp_call_function(scf_cleanup_handler, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	torture_stop_kthread(scf_torture_stats, scf_torture_stats_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	scf_torture_stats_print();  // -After- the stats thread is stopped!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	kfree(scf_stats_p);  // -After- the last stats print has completed!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	scf_stats_p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) || atomic_read(&n_mb_out_errs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		scftorture_print_module_parms("End of test: FAILURE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	else if (torture_onoff_failures())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		scftorture_print_module_parms("End of test: LOCK_HOTPLUG");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		scftorture_print_module_parms("End of test: SUCCESS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	torture_cleanup_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static int __init scf_torture_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	int firsterr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	unsigned long weight_single1 = weight_single;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	unsigned long weight_single_wait1 = weight_single_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	unsigned long weight_many1 = weight_many;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	unsigned long weight_many_wait1 = weight_many_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	unsigned long weight_all1 = weight_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	unsigned long weight_all_wait1 = weight_all_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	if (!torture_init_begin(SCFTORT_STRING, verbose))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	scftorture_print_module_parms("Start of test");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	if (weight_single == -1 && weight_single_wait == -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	    weight_many == -1 && weight_many_wait == -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	    weight_all == -1 && weight_all_wait == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		weight_single1 = 2 * nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		weight_single_wait1 = 2 * nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		weight_many1 = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		weight_many_wait1 = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		weight_all1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		weight_all_wait1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		if (weight_single == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 			weight_single1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		if (weight_single_wait == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 			weight_single_wait1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		if (weight_many == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 			weight_many1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		if (weight_many_wait == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 			weight_many_wait1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		if (weight_all == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 			weight_all1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		if (weight_all_wait == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 			weight_all_wait1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	if (weight_single1 == 0 && weight_single_wait1 == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	    weight_many1 == 0 && weight_many_wait1 == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	    weight_all1 == 0 && weight_all_wait1 == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		VERBOSE_SCFTORTOUT_ERRSTRING("all zero weights makes no sense");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		firsterr = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	scf_sel_add(weight_single1, SCF_PRIM_SINGLE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	scf_sel_add(weight_single_wait1, SCF_PRIM_SINGLE, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	scf_sel_add(weight_many1, SCF_PRIM_MANY, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	scf_sel_add(weight_many_wait1, SCF_PRIM_MANY, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	scf_sel_add(weight_all1, SCF_PRIM_ALL, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	scf_sel_add(weight_all_wait1, SCF_PRIM_ALL, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	scf_sel_dump();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	if (onoff_interval > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	if (shutdown_secs > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		firsterr = torture_shutdown_init(shutdown_secs, scf_torture_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	// Worker tasks invoking smp_call_function().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	if (nthreads < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		nthreads = num_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	scf_stats_p = kcalloc(nthreads, sizeof(scf_stats_p[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	if (!scf_stats_p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		VERBOSE_SCFTORTOUT_ERRSTRING("out of memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		firsterr = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	VERBOSE_SCFTORTOUT("Starting %d smp_call_function() threads\n", nthreads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	atomic_set(&n_started, nthreads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	for (i = 0; i < nthreads; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		scf_stats_p[i].cpu = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		firsterr = torture_create_kthread(scftorture_invoker, (void *)&scf_stats_p[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 						  scf_stats_p[i].task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	if (stat_interval > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		firsterr = torture_create_kthread(scf_torture_stats, NULL, scf_torture_stats_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	torture_init_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) unwind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	torture_init_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	scf_torture_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	return firsterr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) module_init(scf_torture_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) module_exit(scf_torture_cleanup);