^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * kmod stress test driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This program is free software; you can redistribute it and/or modify it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * under the terms of the GNU General Public License as published by the Free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Software Foundation; either version 2 of the License, or at your option any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * later version; or, when distributed separately from the Linux kernel or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * when incorporated into other software packages, subject to the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * license:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This program is free software; you can redistribute it and/or modify it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * under the terms of copyleft-next (version 0.3.1 or later) as published
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * at http://copyleft-next.org/.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * This driver provides an interface to trigger and test the kernel's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * module loader through a series of configurations and a few triggers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * To test this driver use the following script as root:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * tools/testing/selftests/kmod/kmod.sh --help
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/kmod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define TEST_START_NUM_THREADS 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define TEST_START_DRIVER "test_module"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define TEST_START_TEST_FS "xfs"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define TEST_START_TEST_CASE TEST_KMOD_DRIVER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static bool force_init_test = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) module_param(force_init_test, bool_enable_only, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) MODULE_PARM_DESC(force_init_test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) "Force kicking a test immediately after driver loads");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * For device allocation / registration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static DEFINE_MUTEX(reg_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static LIST_HEAD(reg_test_devs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * num_test_devs actually represents the *next* ID of the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * device we will allow to create.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static int num_test_devs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * enum kmod_test_case - linker table test case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * If you add a test case, please be sure to review if you need to se
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * @need_mod_put for your tests case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @TEST_KMOD_DRIVER: stress tests request_module()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @TEST_KMOD_FS_TYPE: stress tests get_fs_type()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) enum kmod_test_case {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) __TEST_KMOD_INVALID = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) TEST_KMOD_DRIVER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) TEST_KMOD_FS_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) __TEST_KMOD_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct test_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) char *test_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) char *test_fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned int num_threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) enum kmod_test_case test_case;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int test_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct kmod_test_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * kmod_test_device_info - thread info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @ret_sync: return value if request_module() is used, sync request for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @TEST_KMOD_DRIVER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * @thread_idx: thread ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @test_dev: test device test is being performed under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @need_mod_put: Some tests (get_fs_type() is one) requires putting the module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * (module_put(fs_sync->owner)) when done, otherwise you will not be able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * to unload the respective modules and re-test. We use this to keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * accounting of when we need this and to help out in case we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * error out and deal with module_put() on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct kmod_test_device_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int ret_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct file_system_type *fs_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct task_struct *task_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned int thread_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct kmod_test_device *test_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) bool need_mod_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * kmod_test_device - test device to help test kmod
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * @dev_idx: unique ID for test device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * @config: configuration for the test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * @misc_dev: we use a misc device under the hood
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * @dev: pointer to misc_dev's own struct device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @config_mutex: protects configuration of test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * @trigger_mutex: the test trigger can only be fired once at a time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * @thread_lock: protects @done count, and the @info per each thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * @done: number of threads which have completed or failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * @test_is_oom: when we run out of memory, use this to halt moving forward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * @kthreads_done: completion used to signal when all work is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * @list: needed to be part of the reg_test_devs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * @info: array of info for each thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct kmod_test_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int dev_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct test_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct miscdevice misc_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct mutex config_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct mutex trigger_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct mutex thread_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned int done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) bool test_is_oom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct completion kthreads_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct kmod_test_device_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static const char *test_case_str(enum kmod_test_case test_case)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) switch (test_case) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) case TEST_KMOD_DRIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return "TEST_KMOD_DRIVER";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) case TEST_KMOD_FS_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return "TEST_KMOD_FS_TYPE";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return "invalid";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static struct miscdevice *dev_to_misc_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static struct kmod_test_device *misc_dev_to_test_dev(struct miscdevice *misc_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return container_of(misc_dev, struct kmod_test_device, misc_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static struct kmod_test_device *dev_to_test_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct miscdevice *misc_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) misc_dev = dev_to_misc_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return misc_dev_to_test_dev(misc_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* Must run with thread_mutex held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void kmod_test_done_check(struct kmod_test_device *test_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) test_dev->done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) dev_dbg(test_dev->dev, "Done thread count: %u\n", test_dev->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (test_dev->done == config->num_threads) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dev_info(test_dev->dev, "Done: %u threads have all run now\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) test_dev->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) dev_info(test_dev->dev, "Last thread to run: %u\n", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) complete(&test_dev->kthreads_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static void test_kmod_put_module(struct kmod_test_device_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct kmod_test_device *test_dev = info->test_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (!info->need_mod_put)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) switch (config->test_case) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) case TEST_KMOD_DRIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) case TEST_KMOD_FS_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (info->fs_sync && info->fs_sync->owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) module_put(info->fs_sync->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) info->need_mod_put = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static int run_request(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct kmod_test_device_info *info = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct kmod_test_device *test_dev = info->test_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) switch (config->test_case) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) case TEST_KMOD_DRIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) info->ret_sync = request_module("%s", config->test_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) case TEST_KMOD_FS_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) info->fs_sync = get_fs_type(config->test_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) info->need_mod_put = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* __trigger_config_run() already checked for test sanity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) dev_dbg(test_dev->dev, "Ran thread %u\n", info->thread_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) test_kmod_put_module(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) mutex_lock(&test_dev->thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) info->task_sync = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) kmod_test_done_check(test_dev, info->thread_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) mutex_unlock(&test_dev->thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static int tally_work_test(struct kmod_test_device_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct kmod_test_device *test_dev = info->test_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int err_ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) switch (config->test_case) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) case TEST_KMOD_DRIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Only capture errors, if one is found that's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * enough, for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (info->ret_sync != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) err_ret = info->ret_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dev_info(test_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) "Sync thread %d return status: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) info->thread_idx, info->ret_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) case TEST_KMOD_FS_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* For now we make this simple */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!info->fs_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) err_ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) dev_info(test_dev->dev, "Sync thread %u fs: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) info->thread_idx, info->fs_sync ? config->test_fs :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) "NULL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return err_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * XXX: add result option to display if all errors did not match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * For now we just keep any error code if one was found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * If this ran it means *all* tasks were created fine and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * are now just collecting results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * Only propagate errors, do not override with a subsequent sucess case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void tally_up_work(struct kmod_test_device *test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct kmod_test_device_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) int err_ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) mutex_lock(&test_dev->thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) dev_info(test_dev->dev, "Results:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) for (idx=0; idx < config->num_threads; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) info = &test_dev->info[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) ret = tally_work_test(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) err_ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * Note: request_module() returns 256 for a module not found even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * though modprobe itself returns 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) config->test_result = err_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) mutex_unlock(&test_dev->thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static int try_one_request(struct kmod_test_device *test_dev, unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct kmod_test_device_info *info = &test_dev->info[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) int fail_ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) mutex_lock(&test_dev->thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) info->thread_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) info->test_dev = test_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) info->task_sync = kthread_run(run_request, info, "%s-%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) KBUILD_MODNAME, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (!info->task_sync || IS_ERR(info->task_sync)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) test_dev->test_is_oom = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) dev_err(test_dev->dev, "Setting up thread %u failed\n", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) info->task_sync = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) dev_dbg(test_dev->dev, "Kicked off thread %u\n", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) mutex_unlock(&test_dev->thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) info->ret_sync = fail_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) mutex_unlock(&test_dev->thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return fail_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static void test_dev_kmod_stop_tests(struct kmod_test_device *test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct kmod_test_device_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) dev_info(test_dev->dev, "Ending request_module() tests\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) mutex_lock(&test_dev->thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) for (i=0; i < config->num_threads; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) info = &test_dev->info[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (info->task_sync && !IS_ERR(info->task_sync)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) dev_info(test_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) "Stopping still-running thread %i\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) kthread_stop(info->task_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * info->task_sync is well protected, it can only be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * NULL or a pointer to a struct. If its NULL we either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * never ran, or we did and we completed the work. Completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * tasks *always* put the module for us. This is a sanity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * check -- just in case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (info->task_sync && info->need_mod_put)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) test_kmod_put_module(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) mutex_unlock(&test_dev->thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * Only wait *iff* we did not run into any errors during all of our thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * set up. If run into any issues we stop threads and just bail out with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * an error to the trigger. This also means we don't need any tally work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * for any threads which fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static int try_requests(struct kmod_test_device *test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) bool any_error = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) for (idx=0; idx < config->num_threads; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (test_dev->test_is_oom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) any_error = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ret = try_one_request(test_dev, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) any_error = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (!any_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) test_dev->test_is_oom = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) dev_info(test_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) "No errors were found while initializing threads\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) wait_for_completion(&test_dev->kthreads_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) tally_up_work(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) test_dev->test_is_oom = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dev_info(test_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) "At least one thread failed to start, stop all work\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) test_dev_kmod_stop_tests(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static int run_test_driver(struct kmod_test_device *test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dev_info(test_dev->dev, "Test case: %s (%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) test_case_str(config->test_case),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) config->test_case);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) dev_info(test_dev->dev, "Test driver to load: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) config->test_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) dev_info(test_dev->dev, "Number of threads to run: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) config->num_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) config->num_threads - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return try_requests(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static int run_test_fs_type(struct kmod_test_device *test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dev_info(test_dev->dev, "Test case: %s (%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) test_case_str(config->test_case),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) config->test_case);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) dev_info(test_dev->dev, "Test filesystem to load: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) config->test_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) dev_info(test_dev->dev, "Number of threads to run: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) config->num_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) config->num_threads - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return try_requests(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static ssize_t config_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct kmod_test_device *test_dev = dev_to_test_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) mutex_lock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) len += snprintf(buf, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) "Custom trigger configuration for: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) len += snprintf(buf+len, PAGE_SIZE - len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) "Number of threads:\t%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) config->num_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) len += snprintf(buf+len, PAGE_SIZE - len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) "Test_case:\t%s (%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) test_case_str(config->test_case),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) config->test_case);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (config->test_driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) len += snprintf(buf+len, PAGE_SIZE - len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) "driver:\t%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) config->test_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) len += snprintf(buf+len, PAGE_SIZE - len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) "driver:\tEMPTY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (config->test_fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) len += snprintf(buf+len, PAGE_SIZE - len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) "fs:\t%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) config->test_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) len += snprintf(buf+len, PAGE_SIZE - len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) "fs:\tEMPTY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) static DEVICE_ATTR_RO(config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * This ensures we don't allow kicking threads through if our configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * is faulty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static int __trigger_config_run(struct kmod_test_device *test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) test_dev->done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) switch (config->test_case) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) case TEST_KMOD_DRIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return run_test_driver(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) case TEST_KMOD_FS_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return run_test_fs_type(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dev_warn(test_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) "Invalid test case requested: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) config->test_case);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) static int trigger_config_run(struct kmod_test_device *test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) mutex_lock(&test_dev->trigger_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) mutex_lock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ret = __trigger_config_run(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) dev_info(test_dev->dev, "General test result: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) config->test_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * We must return 0 after a trigger even unless something went
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * wrong with the setup of the test. If the test setup went fine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * then userspace must just check the result of config->test_result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * One issue with relying on the return from a call in the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * is if the kernel returns a possitive value using this trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * will not return the value to userspace, it would be lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * By not relying on capturing the return value of tests we are using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * through the trigger it also us to run tests with set -e and only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * fail when something went wrong with the driver upon trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) mutex_unlock(&test_dev->trigger_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) trigger_config_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct kmod_test_device *test_dev = dev_to_test_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (test_dev->test_is_oom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* For all intents and purposes we don't care what userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * sent this trigger, we care only that we were triggered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * We treat the return value only for caputuring issues with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * the test setup. At this point all the test variables should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * have been allocated so typically this should never fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ret = trigger_config_run(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * Note: any return > 0 will be treated as success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * and the error value will not be available to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * Do not rely on trying to send to userspace a test value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * return value as possitive return errors will be lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (WARN_ON(ret > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static DEVICE_ATTR_WO(trigger_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * XXX: move to kstrncpy() once merged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * Users should use kfree_const() when freeing these.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) *dst = kstrndup(name, count, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (!*dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) static int config_copy_test_driver_name(struct test_config *config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return __kstrncpy(&config->test_driver, name, count, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static int config_copy_test_fs(struct test_config *config, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return __kstrncpy(&config->test_fs, name, count, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static void __kmod_config_free(struct test_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) kfree_const(config->test_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) config->test_driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) kfree_const(config->test_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) config->test_fs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static void kmod_config_free(struct kmod_test_device *test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct test_config *config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (!test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) mutex_lock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) __kmod_config_free(config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static ssize_t config_test_driver_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct kmod_test_device *test_dev = dev_to_test_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) int copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) mutex_lock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) kfree_const(config->test_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) config->test_driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) copied = config_copy_test_driver_name(config, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static ssize_t config_test_show_str(struct mutex *config_mutex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) char *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) char *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) mutex_lock(config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) len = snprintf(dst, PAGE_SIZE, "%s\n", src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) mutex_unlock(config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) static ssize_t config_test_driver_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct kmod_test_device *test_dev = dev_to_test_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return config_test_show_str(&test_dev->config_mutex, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) config->test_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static DEVICE_ATTR_RW(config_test_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static ssize_t config_test_fs_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct kmod_test_device *test_dev = dev_to_test_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) int copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) mutex_lock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) kfree_const(config->test_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) config->test_fs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) copied = config_copy_test_fs(config, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static ssize_t config_test_fs_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct kmod_test_device *test_dev = dev_to_test_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return config_test_show_str(&test_dev->config_mutex, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) config->test_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) static DEVICE_ATTR_RW(config_test_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static int trigger_config_run_type(struct kmod_test_device *test_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) enum kmod_test_case test_case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) const char *test_str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) int copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) mutex_lock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) switch (test_case) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) case TEST_KMOD_DRIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) kfree_const(config->test_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) config->test_driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) copied = config_copy_test_driver_name(config, test_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) strlen(test_str));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) case TEST_KMOD_FS_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) kfree_const(config->test_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) config->test_fs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) copied = config_copy_test_fs(config, test_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) strlen(test_str));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) config->test_case = test_case;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (copied <= 0 || copied != strlen(test_str)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) test_dev->test_is_oom = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) test_dev->test_is_oom = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return trigger_config_run(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static void free_test_dev_info(struct kmod_test_device *test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) vfree(test_dev->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) test_dev->info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static int kmod_config_sync_info(struct kmod_test_device *test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) free_test_dev_info(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) test_dev->info =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) vzalloc(array_size(sizeof(struct kmod_test_device_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) config->num_threads));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (!test_dev->info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * Old kernels may not have this, if you want to port this code to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * test it on older kernels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) #ifdef get_kmod_umh_limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static unsigned int kmod_init_test_thread_limit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return get_kmod_umh_limit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static unsigned int kmod_init_test_thread_limit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return TEST_START_NUM_THREADS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static int __kmod_config_init(struct kmod_test_device *test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) int ret = -ENOMEM, copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) __kmod_config_free(config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) copied = config_copy_test_driver_name(config, TEST_START_DRIVER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) strlen(TEST_START_DRIVER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (copied != strlen(TEST_START_DRIVER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) copied = config_copy_test_fs(config, TEST_START_TEST_FS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) strlen(TEST_START_TEST_FS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (copied != strlen(TEST_START_TEST_FS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) config->num_threads = kmod_init_test_thread_limit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) config->test_result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) config->test_case = TEST_START_TEST_CASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) ret = kmod_config_sync_info(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) test_dev->test_is_oom = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) test_dev->test_is_oom = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) WARN_ON(test_dev->test_is_oom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) __kmod_config_free(config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) static ssize_t reset_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) struct kmod_test_device *test_dev = dev_to_test_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) mutex_lock(&test_dev->trigger_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) mutex_lock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ret = __kmod_config_init(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) dev_err(dev, "could not alloc settings for config trigger: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) dev_info(dev, "reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) mutex_unlock(&test_dev->trigger_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static DEVICE_ATTR_WO(reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) const char *buf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) unsigned int *config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) int (*test_sync)(struct kmod_test_device *test_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) unsigned long new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) unsigned int old_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ret = kstrtoul(buf, 10, &new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (new > UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) mutex_lock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) old_val = *config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) *(unsigned int *)config = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ret = test_sync(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) *(unsigned int *)config = old_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) ret = test_sync(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /* Always return full write size even if we didn't consume all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) const char *buf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) unsigned int *config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) unsigned int min,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) unsigned int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) unsigned long new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ret = kstrtoul(buf, 10, &new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (new < min || new > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) mutex_lock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) *config = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /* Always return full write size even if we didn't consume all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static int test_dev_config_update_int(struct kmod_test_device *test_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) const char *buf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) int *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) long new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) ret = kstrtol(buf, 10, &new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (new < INT_MIN || new > INT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) mutex_lock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) *config = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /* Always return full write size even if we didn't consume all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) static ssize_t test_dev_config_show_int(struct kmod_test_device *test_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) int config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) mutex_lock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) val = config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return snprintf(buf, PAGE_SIZE, "%d\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) static ssize_t test_dev_config_show_uint(struct kmod_test_device *test_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) unsigned int config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) mutex_lock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) val = config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return snprintf(buf, PAGE_SIZE, "%u\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) static ssize_t test_result_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct kmod_test_device *test_dev = dev_to_test_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return test_dev_config_update_int(test_dev, buf, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) &config->test_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) static ssize_t config_num_threads_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct kmod_test_device *test_dev = dev_to_test_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return test_dev_config_update_uint_sync(test_dev, buf, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) &config->num_threads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) kmod_config_sync_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static ssize_t config_num_threads_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct kmod_test_device *test_dev = dev_to_test_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) return test_dev_config_show_int(test_dev, buf, config->num_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static DEVICE_ATTR_RW(config_num_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static ssize_t config_test_case_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct kmod_test_device *test_dev = dev_to_test_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) return test_dev_config_update_uint_range(test_dev, buf, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) &config->test_case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) __TEST_KMOD_INVALID + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) __TEST_KMOD_MAX - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static ssize_t config_test_case_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) struct kmod_test_device *test_dev = dev_to_test_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return test_dev_config_show_uint(test_dev, buf, config->test_case);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static DEVICE_ATTR_RW(config_test_case);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static ssize_t test_result_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct kmod_test_device *test_dev = dev_to_test_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct test_config *config = &test_dev->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return test_dev_config_show_int(test_dev, buf, config->test_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static DEVICE_ATTR_RW(test_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) #define TEST_KMOD_DEV_ATTR(name) &dev_attr_##name.attr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static struct attribute *test_dev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) TEST_KMOD_DEV_ATTR(trigger_config),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) TEST_KMOD_DEV_ATTR(config),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) TEST_KMOD_DEV_ATTR(reset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) TEST_KMOD_DEV_ATTR(config_test_driver),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) TEST_KMOD_DEV_ATTR(config_test_fs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) TEST_KMOD_DEV_ATTR(config_num_threads),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) TEST_KMOD_DEV_ATTR(config_test_case),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) TEST_KMOD_DEV_ATTR(test_result),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ATTRIBUTE_GROUPS(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static int kmod_config_init(struct kmod_test_device *test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) mutex_lock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) ret = __kmod_config_init(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static struct kmod_test_device *alloc_test_dev_kmod(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) struct kmod_test_device *test_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct miscdevice *misc_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) test_dev = vzalloc(sizeof(struct kmod_test_device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (!test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) mutex_init(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) mutex_init(&test_dev->trigger_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) mutex_init(&test_dev->thread_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) init_completion(&test_dev->kthreads_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) ret = kmod_config_init(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) pr_err("Cannot alloc kmod_config_init()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) goto err_out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) test_dev->dev_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) misc_dev = &test_dev->misc_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) misc_dev->minor = MISC_DYNAMIC_MINOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) misc_dev->name = kasprintf(GFP_KERNEL, "test_kmod%d", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (!misc_dev->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) pr_err("Cannot alloc misc_dev->name\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) goto err_out_free_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) misc_dev->groups = test_dev_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return test_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) err_out_free_config:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) free_test_dev_info(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) kmod_config_free(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) err_out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) vfree(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) test_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static void free_test_dev_kmod(struct kmod_test_device *test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (test_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) kfree_const(test_dev->misc_dev.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) test_dev->misc_dev.name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) free_test_dev_info(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) kmod_config_free(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) vfree(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) test_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static struct kmod_test_device *register_test_dev_kmod(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) struct kmod_test_device *test_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) mutex_lock(®_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /* int should suffice for number of devices, test for wrap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (num_test_devs + 1 == INT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) pr_err("reached limit of number of test devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) test_dev = alloc_test_dev_kmod(num_test_devs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (!test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ret = misc_register(&test_dev->misc_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) pr_err("could not register misc device: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) free_test_dev_kmod(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) test_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) test_dev->dev = test_dev->misc_dev.this_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) list_add_tail(&test_dev->list, ®_test_devs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) dev_info(test_dev->dev, "interface ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) num_test_devs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) mutex_unlock(®_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return test_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static int __init test_kmod_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct kmod_test_device *test_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) test_dev = register_test_dev_kmod();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (!test_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) pr_err("Cannot add first test kmod device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * With some work we might be able to gracefully enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * testing with this driver built-in, for now this seems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * rather risky. For those willing to try have at it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * and enable the below. Good luck! If that works, try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * lowering the init level for more fun.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (force_init_test) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) ret = trigger_config_run_type(test_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) TEST_KMOD_DRIVER, "tun");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (WARN_ON(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) ret = trigger_config_run_type(test_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) TEST_KMOD_FS_TYPE, "btrfs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (WARN_ON(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) late_initcall(test_kmod_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) void unregister_test_dev_kmod(struct kmod_test_device *test_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) mutex_lock(&test_dev->trigger_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) mutex_lock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) test_dev_kmod_stop_tests(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) dev_info(test_dev->dev, "removing interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) misc_deregister(&test_dev->misc_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) mutex_unlock(&test_dev->config_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) mutex_unlock(&test_dev->trigger_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) free_test_dev_kmod(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static void __exit test_kmod_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) struct kmod_test_device *test_dev, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) mutex_lock(®_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) list_for_each_entry_safe(test_dev, tmp, ®_test_devs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) list_del(&test_dev->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) unregister_test_dev_kmod(test_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) mutex_unlock(®_dev_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) module_exit(test_kmod_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) MODULE_LICENSE("GPL");