Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Module-based torture test facility for locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) IBM Corporation, 2014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *          Davidlohr Bueso <dave@stgolabs.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *	Based on kernel/rcu/torture.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #define pr_fmt(fmt) fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/sched/rt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <uapi/linux/sched/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/rtmutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/percpu-rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/torture.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) torture_param(int, nwriters_stress, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	     "Number of write-locking stress-test threads");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) torture_param(int, nreaders_stress, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	     "Number of read-locking stress-test threads");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) torture_param(int, onoff_interval, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	     "Time between CPU hotplugs (s), 0=disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) torture_param(int, shuffle_interval, 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	     "Number of jiffies between shuffles, 0=disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) torture_param(int, stat_interval, 60,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	     "Number of seconds between stats printk()s");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) torture_param(int, verbose, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	     "Enable verbose debugging printk()s");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) static char *torture_type = "spin_lock";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) module_param(torture_type, charp, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) MODULE_PARM_DESC(torture_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 		 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) static struct task_struct *stats_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static struct task_struct **writer_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static struct task_struct **reader_tasks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static bool lock_is_write_held;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static bool lock_is_read_held;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) struct lock_stress_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	long n_lock_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	long n_lock_acquired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) /* Forward reference. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) static void lock_torture_cleanup(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * Operations vector for selecting different types of tests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) struct lock_torture_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	void (*init)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	int (*writelock)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	void (*write_delay)(struct torture_random_state *trsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	void (*task_boost)(struct torture_random_state *trsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	void (*writeunlock)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	int (*readlock)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	void (*read_delay)(struct torture_random_state *trsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	void (*readunlock)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	unsigned long flags; /* for irq spinlocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) struct lock_torture_cxt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	int nrealwriters_stress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	int nrealreaders_stress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	bool debug_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	atomic_t n_lock_torture_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	struct lock_torture_ops *cur_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	struct lock_stress_stats *lwsa; /* writer statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	struct lock_stress_stats *lrsa; /* reader statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) static struct lock_torture_cxt cxt = { 0, 0, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 				       ATOMIC_INIT(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 				       NULL, NULL};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  * Definitions for lock torture testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) static int torture_lock_busted_write_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	return 0;  /* BUGGY, do not use in real life!!! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	const unsigned long longdelay_ms = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	/* We want a long delay occasionally to force massive contention.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	if (!(torture_random(trsp) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		mdelay(longdelay_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		torture_preempt_schedule();  /* Allow test to be preempted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) static void torture_lock_busted_write_unlock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	  /* BUGGY, do not use in real life!!! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) static void torture_boost_dummy(struct torture_random_state *trsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	/* Only rtmutexes care about priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) static struct lock_torture_ops lock_busted_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	.writelock	= torture_lock_busted_write_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	.write_delay	= torture_lock_busted_write_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	.task_boost     = torture_boost_dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	.writeunlock	= torture_lock_busted_write_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	.readlock       = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	.read_delay     = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	.readunlock     = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	.name		= "lock_busted"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) static DEFINE_SPINLOCK(torture_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	spin_lock(&torture_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	const unsigned long shortdelay_us = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	const unsigned long longdelay_ms = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	/* We want a short delay mostly to emulate likely code, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	 * we want a long delay occasionally to force massive contention.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	if (!(torture_random(trsp) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		mdelay(longdelay_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	if (!(torture_random(trsp) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	      (cxt.nrealwriters_stress * 2 * shortdelay_us)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		udelay(shortdelay_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		torture_preempt_schedule();  /* Allow test to be preempted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	spin_unlock(&torture_spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) static struct lock_torture_ops spin_lock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	.writelock	= torture_spin_lock_write_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	.write_delay	= torture_spin_lock_write_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	.task_boost     = torture_boost_dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	.writeunlock	= torture_spin_lock_write_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	.readlock       = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	.read_delay     = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	.readunlock     = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	.name		= "spin_lock"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) static int torture_spin_lock_write_lock_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) __acquires(torture_spinlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	spin_lock_irqsave(&torture_spinlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	cxt.cur_ops->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) static void torture_lock_spin_write_unlock_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) __releases(torture_spinlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) static struct lock_torture_ops spin_lock_irq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	.writelock	= torture_spin_lock_write_lock_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	.write_delay	= torture_spin_lock_write_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	.task_boost     = torture_boost_dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	.writeunlock	= torture_lock_spin_write_unlock_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	.readlock       = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	.read_delay     = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	.readunlock     = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	.name		= "spin_lock_irq"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) static DEFINE_RWLOCK(torture_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	write_lock(&torture_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) static void torture_rwlock_write_delay(struct torture_random_state *trsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	const unsigned long shortdelay_us = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	const unsigned long longdelay_ms = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	/* We want a short delay mostly to emulate likely code, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	 * we want a long delay occasionally to force massive contention.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	if (!(torture_random(trsp) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		mdelay(longdelay_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		udelay(shortdelay_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	write_unlock(&torture_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	read_lock(&torture_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) static void torture_rwlock_read_delay(struct torture_random_state *trsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	const unsigned long shortdelay_us = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	const unsigned long longdelay_ms = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	/* We want a short delay mostly to emulate likely code, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	 * we want a long delay occasionally to force massive contention.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	if (!(torture_random(trsp) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	      (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		mdelay(longdelay_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		udelay(shortdelay_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	read_unlock(&torture_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) static struct lock_torture_ops rw_lock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	.writelock	= torture_rwlock_write_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	.write_delay	= torture_rwlock_write_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	.task_boost     = torture_boost_dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	.writeunlock	= torture_rwlock_write_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	.readlock       = torture_rwlock_read_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	.read_delay     = torture_rwlock_read_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	.readunlock     = torture_rwlock_read_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	.name		= "rw_lock"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	write_lock_irqsave(&torture_rwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	cxt.cur_ops->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) static void torture_rwlock_write_unlock_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) __releases(torture_rwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	read_lock_irqsave(&torture_rwlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	cxt.cur_ops->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) static void torture_rwlock_read_unlock_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) __releases(torture_rwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) static struct lock_torture_ops rw_lock_irq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	.writelock	= torture_rwlock_write_lock_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	.write_delay	= torture_rwlock_write_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	.task_boost     = torture_boost_dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	.writeunlock	= torture_rwlock_write_unlock_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	.readlock       = torture_rwlock_read_lock_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	.read_delay     = torture_rwlock_read_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	.readunlock     = torture_rwlock_read_unlock_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	.name		= "rw_lock_irq"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) static DEFINE_MUTEX(torture_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) static int torture_mutex_lock(void) __acquires(torture_mutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	mutex_lock(&torture_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) static void torture_mutex_delay(struct torture_random_state *trsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	const unsigned long longdelay_ms = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	/* We want a long delay occasionally to force massive contention.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	if (!(torture_random(trsp) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		mdelay(longdelay_ms * 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		mdelay(longdelay_ms / 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		torture_preempt_schedule();  /* Allow test to be preempted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) static void torture_mutex_unlock(void) __releases(torture_mutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	mutex_unlock(&torture_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) static struct lock_torture_ops mutex_lock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	.writelock	= torture_mutex_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	.write_delay	= torture_mutex_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	.task_boost     = torture_boost_dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	.writeunlock	= torture_mutex_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	.readlock       = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	.read_delay     = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	.readunlock     = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	.name		= "mutex_lock"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) #include <linux/ww_mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) static DEFINE_WD_CLASS(torture_ww_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) static int torture_ww_mutex_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) __acquires(torture_ww_mutex_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) __acquires(torture_ww_mutex_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) __acquires(torture_ww_mutex_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	struct reorder_lock {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		struct list_head link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		struct ww_mutex *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	} locks[3], *ll, *ln;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct ww_acquire_ctx ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	locks[0].lock = &torture_ww_mutex_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	list_add(&locks[0].link, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	locks[1].lock = &torture_ww_mutex_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	list_add(&locks[1].link, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	locks[2].lock = &torture_ww_mutex_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	list_add(&locks[2].link, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	ww_acquire_init(&ctx, &torture_ww_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	list_for_each_entry(ll, &list, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		err = ww_mutex_lock(ll->lock, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		ln = ll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		list_for_each_entry_continue_reverse(ln, &list, link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			ww_mutex_unlock(ln->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		if (err != -EDEADLK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		ww_mutex_lock_slow(ll->lock, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		list_move(&ll->link, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	ww_acquire_fini(&ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) static void torture_ww_mutex_unlock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) __releases(torture_ww_mutex_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) __releases(torture_ww_mutex_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) __releases(torture_ww_mutex_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	ww_mutex_unlock(&torture_ww_mutex_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	ww_mutex_unlock(&torture_ww_mutex_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	ww_mutex_unlock(&torture_ww_mutex_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) static struct lock_torture_ops ww_mutex_lock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	.writelock	= torture_ww_mutex_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	.write_delay	= torture_mutex_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	.task_boost     = torture_boost_dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	.writeunlock	= torture_ww_mutex_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	.readlock       = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	.read_delay     = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	.readunlock     = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	.name		= "ww_mutex_lock"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) #ifdef CONFIG_RT_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) static DEFINE_RT_MUTEX(torture_rtmutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	rt_mutex_lock(&torture_rtmutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) static void torture_rtmutex_boost(struct torture_random_state *trsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	const unsigned int factor = 50000; /* yes, quite arbitrary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	if (!rt_task(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		 * Boost priority once every ~50k operations. When the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		 * task tries to take the lock, the rtmutex it will account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		 * for the new priority, and do any corresponding pi-dance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		if (trsp && !(torture_random(trsp) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			      (cxt.nrealwriters_stress * factor))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			sched_set_fifo(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		} else /* common case, do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		 * The task will remain boosted for another ~500k operations,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		 * then restored back to its original prio, and so forth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		 * When @trsp is nil, we want to force-reset the task for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		 * stopping the kthread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		if (!trsp || !(torture_random(trsp) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 			       (cxt.nrealwriters_stress * factor * 2))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			sched_set_normal(current, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		} else /* common case, do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) static void torture_rtmutex_delay(struct torture_random_state *trsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	const unsigned long shortdelay_us = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	const unsigned long longdelay_ms = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	 * We want a short delay mostly to emulate likely code, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	 * we want a long delay occasionally to force massive contention.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	if (!(torture_random(trsp) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		mdelay(longdelay_ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	if (!(torture_random(trsp) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	      (cxt.nrealwriters_stress * 2 * shortdelay_us)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		udelay(shortdelay_us);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		torture_preempt_schedule();  /* Allow test to be preempted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	rt_mutex_unlock(&torture_rtmutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) static struct lock_torture_ops rtmutex_lock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	.writelock	= torture_rtmutex_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	.write_delay	= torture_rtmutex_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	.task_boost     = torture_rtmutex_boost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	.writeunlock	= torture_rtmutex_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	.readlock       = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	.read_delay     = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	.readunlock     = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	.name		= "rtmutex_lock"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) static DECLARE_RWSEM(torture_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	down_write(&torture_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) static void torture_rwsem_write_delay(struct torture_random_state *trsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	const unsigned long longdelay_ms = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	/* We want a long delay occasionally to force massive contention.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	if (!(torture_random(trsp) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		mdelay(longdelay_ms * 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		mdelay(longdelay_ms / 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		torture_preempt_schedule();  /* Allow test to be preempted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) static void torture_rwsem_up_write(void) __releases(torture_rwsem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	up_write(&torture_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	down_read(&torture_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) static void torture_rwsem_read_delay(struct torture_random_state *trsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	const unsigned long longdelay_ms = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	/* We want a long delay occasionally to force massive contention.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	if (!(torture_random(trsp) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	      (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		mdelay(longdelay_ms * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		mdelay(longdelay_ms / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		torture_preempt_schedule();  /* Allow test to be preempted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) static void torture_rwsem_up_read(void) __releases(torture_rwsem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	up_read(&torture_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) static struct lock_torture_ops rwsem_lock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	.writelock	= torture_rwsem_down_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	.write_delay	= torture_rwsem_write_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	.task_boost     = torture_boost_dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	.writeunlock	= torture_rwsem_up_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	.readlock       = torture_rwsem_down_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	.read_delay     = torture_rwsem_read_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	.readunlock     = torture_rwsem_up_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	.name		= "rwsem_lock"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) #include <linux/percpu-rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) static struct percpu_rw_semaphore pcpu_rwsem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) static void torture_percpu_rwsem_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	percpu_down_write(&pcpu_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	percpu_up_write(&pcpu_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	percpu_down_read(&pcpu_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	percpu_up_read(&pcpu_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) static struct lock_torture_ops percpu_rwsem_lock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	.init		= torture_percpu_rwsem_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	.writelock	= torture_percpu_rwsem_down_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	.write_delay	= torture_rwsem_write_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	.task_boost     = torture_boost_dummy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	.writeunlock	= torture_percpu_rwsem_up_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	.readlock       = torture_percpu_rwsem_down_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	.read_delay     = torture_rwsem_read_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	.readunlock     = torture_percpu_rwsem_up_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	.name		= "percpu_rwsem_lock"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  * Lock torture writer kthread.  Repeatedly acquires and releases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  * the lock, checking for duplicate acquisitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) static int lock_torture_writer(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	struct lock_stress_stats *lwsp = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	DEFINE_TORTURE_RANDOM(rand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	VERBOSE_TOROUT_STRING("lock_torture_writer task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	set_user_nice(current, MAX_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		if ((torture_random(&rand) & 0xfffff) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		cxt.cur_ops->task_boost(&rand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		cxt.cur_ops->writelock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		if (WARN_ON_ONCE(lock_is_write_held))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			lwsp->n_lock_fail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		lock_is_write_held = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		if (WARN_ON_ONCE(lock_is_read_held))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			lwsp->n_lock_fail++; /* rare, but... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		lwsp->n_lock_acquired++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		cxt.cur_ops->write_delay(&rand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		lock_is_write_held = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		cxt.cur_ops->writeunlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		stutter_wait("lock_torture_writer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	cxt.cur_ops->task_boost(NULL); /* reset prio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	torture_kthread_stopping("lock_torture_writer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  * Lock torture reader kthread.  Repeatedly acquires and releases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  * the reader lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) static int lock_torture_reader(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	struct lock_stress_stats *lrsp = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	DEFINE_TORTURE_RANDOM(rand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	VERBOSE_TOROUT_STRING("lock_torture_reader task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	set_user_nice(current, MAX_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		if ((torture_random(&rand) & 0xfffff) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			schedule_timeout_uninterruptible(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		cxt.cur_ops->readlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		lock_is_read_held = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		if (WARN_ON_ONCE(lock_is_write_held))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			lrsp->n_lock_fail++; /* rare, but... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		lrsp->n_lock_acquired++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		cxt.cur_ops->read_delay(&rand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		lock_is_read_held = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		cxt.cur_ops->readunlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		stutter_wait("lock_torture_reader");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	torture_kthread_stopping("lock_torture_reader");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  * Create an lock-torture-statistics message in the specified buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) static void __torture_print_stats(char *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 				  struct lock_stress_stats *statp, bool write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	bool fail = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	int i, n_stress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	long max = 0, min = statp ? statp[0].n_lock_acquired : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	long long sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	for (i = 0; i < n_stress; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		if (statp[i].n_lock_fail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			fail = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		sum += statp[i].n_lock_acquired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		if (max < statp[i].n_lock_acquired)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			max = statp[i].n_lock_acquired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		if (min > statp[i].n_lock_acquired)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 			min = statp[i].n_lock_acquired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	page += sprintf(page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			"%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 			write ? "Writes" : "Reads ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			sum, max, min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 			!onoff_interval && max / 2 > min ? "???" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			fail, fail ? "!!!" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	if (fail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		atomic_inc(&cxt.n_lock_torture_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709)  * Print torture statistics.  Caller must ensure that there is only one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710)  * call to this function at a given time!!!  This is normally accomplished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711)  * by relying on the module system to only have one copy of the module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712)  * loaded, and then by giving the lock_torture_stats kthread full control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713)  * (or the init/cleanup functions when lock_torture_stats thread is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714)  * running).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) static void lock_torture_stats_print(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	int size = cxt.nrealwriters_stress * 200 + 8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	if (cxt.cur_ops->readlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		size += cxt.nrealreaders_stress * 200 + 8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	buf = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		pr_err("lock_torture_stats_print: Out of memory, need: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		       size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	__torture_print_stats(buf, cxt.lwsa, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	pr_alert("%s", buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	if (cxt.cur_ops->readlock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		buf = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			pr_err("lock_torture_stats_print: Out of memory, need: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 			       size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		__torture_print_stats(buf, cxt.lrsa, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		pr_alert("%s", buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  * Periodically prints torture statistics, if periodic statistics printing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  * was specified via the stat_interval module parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  * No need to worry about fullstop here, since this one doesn't reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754)  * volatile state or register callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) static int lock_torture_stats(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	VERBOSE_TOROUT_STRING("lock_torture_stats task started");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		schedule_timeout_interruptible(stat_interval * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		lock_torture_stats_print();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		torture_shutdown_absorb("lock_torture_stats");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	} while (!torture_must_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	torture_kthread_stopping("lock_torture_stats");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 				const char *tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	pr_alert("%s" TORTURE_FLAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		 torture_type, tag, cxt.debug_lock ? " [debug]": "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		 verbose, shuffle_interval, stutter, shutdown_secs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		 onoff_interval, onoff_holdoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) static void lock_torture_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	if (torture_cleanup_begin())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	 * Indicates early cleanup, meaning that the test has not run,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	 * such as when passing bogus args when loading the module. As
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	 * such, only perform the underlying torture-specific cleanups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	 * and avoid anything related to locktorture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	if (!cxt.lwsa && !cxt.lrsa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (writer_tasks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		for (i = 0; i < cxt.nrealwriters_stress; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			torture_stop_kthread(lock_torture_writer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 					     writer_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		kfree(writer_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		writer_tasks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	if (reader_tasks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		for (i = 0; i < cxt.nrealreaders_stress; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			torture_stop_kthread(lock_torture_reader,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 					     reader_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		kfree(reader_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		reader_tasks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	torture_stop_kthread(lock_torture_stats, stats_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	lock_torture_stats_print();  /* -After- the stats thread is stopped! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	if (atomic_read(&cxt.n_lock_torture_errors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		lock_torture_print_module_parms(cxt.cur_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 						"End of test: FAILURE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	else if (torture_onoff_failures())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		lock_torture_print_module_parms(cxt.cur_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 						"End of test: LOCK_HOTPLUG");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		lock_torture_print_module_parms(cxt.cur_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 						"End of test: SUCCESS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	kfree(cxt.lwsa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	cxt.lwsa = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	kfree(cxt.lrsa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	cxt.lrsa = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	torture_cleanup_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) static int __init lock_torture_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	int firsterr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	static struct lock_torture_ops *torture_ops[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		&lock_busted_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		&spin_lock_ops, &spin_lock_irq_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		&rw_lock_ops, &rw_lock_irq_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		&mutex_lock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		&ww_mutex_lock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) #ifdef CONFIG_RT_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		&rtmutex_lock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		&rwsem_lock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		&percpu_rwsem_lock_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	if (!torture_init_begin(torture_type, verbose))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	/* Process args and tell the world that the torturer is on the job. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		cxt.cur_ops = torture_ops[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		if (strcmp(torture_type, cxt.cur_ops->name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	if (i == ARRAY_SIZE(torture_ops)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		pr_alert("lock-torture: invalid torture type: \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			 torture_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		pr_alert("lock-torture types:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 			pr_alert(" %s", torture_ops[i]->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		pr_alert("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		firsterr = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	if (nwriters_stress == 0 && nreaders_stress == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		pr_alert("lock-torture: must run at least one locking thread\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		firsterr = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	if (cxt.cur_ops->init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		cxt.cur_ops->init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	if (nwriters_stress >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		cxt.nrealwriters_stress = nwriters_stress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		cxt.nrealwriters_stress = 2 * num_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) #ifdef CONFIG_DEBUG_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	if (str_has_prefix(torture_type, "mutex"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		cxt.debug_lock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) #ifdef CONFIG_DEBUG_RT_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	if (str_has_prefix(torture_type, "rtmutex"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		cxt.debug_lock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) #ifdef CONFIG_DEBUG_SPINLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	if ((str_has_prefix(torture_type, "spin")) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	    (str_has_prefix(torture_type, "rw_lock")))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		cxt.debug_lock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	/* Initialize the statistics so that each run gets its own numbers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	if (nwriters_stress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		lock_is_write_held = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 					 sizeof(*cxt.lwsa),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 					 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		if (cxt.lwsa == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 			VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			firsterr = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		for (i = 0; i < cxt.nrealwriters_stress; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 			cxt.lwsa[i].n_lock_fail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			cxt.lwsa[i].n_lock_acquired = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	if (cxt.cur_ops->readlock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		if (nreaders_stress >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 			cxt.nrealreaders_stress = nreaders_stress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 			 * By default distribute evenly the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			 * readers and writers. We still run the same number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			 * of threads as the writer-only locks default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			if (nwriters_stress < 0) /* user doesn't care */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 				cxt.nrealwriters_stress = num_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			cxt.nrealreaders_stress = cxt.nrealwriters_stress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		if (nreaders_stress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			lock_is_read_held = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 						 sizeof(*cxt.lrsa),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 						 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			if (cxt.lrsa == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 				VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 				firsterr = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 				kfree(cxt.lwsa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 				cxt.lwsa = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 				goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			for (i = 0; i < cxt.nrealreaders_stress; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 				cxt.lrsa[i].n_lock_fail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 				cxt.lrsa[i].n_lock_acquired = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	/* Prepare torture context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if (onoff_interval > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		firsterr = torture_onoff_init(onoff_holdoff * HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 					      onoff_interval * HZ, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	if (shuffle_interval > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		firsterr = torture_shuffle_init(shuffle_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	if (shutdown_secs > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		firsterr = torture_shutdown_init(shutdown_secs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 						 lock_torture_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	if (stutter > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		firsterr = torture_stutter_init(stutter, stutter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	if (nwriters_stress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		writer_tasks = kcalloc(cxt.nrealwriters_stress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 				       sizeof(writer_tasks[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 				       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		if (writer_tasks == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 			firsterr = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	if (cxt.cur_ops->readlock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		reader_tasks = kcalloc(cxt.nrealreaders_stress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 				       sizeof(reader_tasks[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 				       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		if (reader_tasks == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 			VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 			kfree(writer_tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			writer_tasks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			firsterr = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	 * Create the kthreads and start torturing (oh, those poor little locks).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	 * TODO: Note that we interleave writers with readers, giving writers a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	 * slight advantage, by creating its kthread first. This can be modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	 * for very specific needs, or even let the user choose the policy, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 * ever wanted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		    j < cxt.nrealreaders_stress; i++, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		if (i >= cxt.nrealwriters_stress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			goto create_reader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		/* Create writer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 						  writer_tasks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	create_reader:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		/* Create reader. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 						  reader_tasks[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	if (stat_interval > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		firsterr = torture_create_kthread(lock_torture_stats, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 						  stats_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		if (firsterr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	torture_init_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) unwind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	torture_init_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	lock_torture_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	return firsterr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) module_init(lock_torture_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) module_exit(lock_torture_cleanup);