^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Shaohua Li <shli@fb.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "null_blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define SECTOR_MASK (PAGE_SECTORS - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define FREE_BATCH 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define TICKS_PER_SEC 50ULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static DECLARE_FAULT_ATTR(null_timeout_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static DECLARE_FAULT_ATTR(null_requeue_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static DECLARE_FAULT_ATTR(null_init_hctx_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static inline u64 mb_per_tick(int mbps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Status flags for nullb_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * UP: Device is currently on and visible in userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * THROTTLED: Device is being throttled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * CACHE: Device is using a write-back cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) enum nullb_device_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) NULLB_DEV_FL_CONFIGURED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) NULLB_DEV_FL_UP = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) NULLB_DEV_FL_THROTTLED = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) NULLB_DEV_FL_CACHE = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * nullb_page is a page in memory for nullb devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * @page: The page holding the data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @bitmap: The bitmap represents which sector in the page has data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Each bit represents one block size. For example, sector 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * will use the 7th bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * page is being flushing to storage. FREE means the cache page is freed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * should be skipped from flushing to storage. Please see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * null_make_cache_space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct nullb_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) DECLARE_BITMAP(bitmap, MAP_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define NULLB_PAGE_LOCK (MAP_SZ - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define NULLB_PAGE_FREE (MAP_SZ - 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static LIST_HEAD(nullb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static int null_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static DEFINE_IDA(nullb_indexes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static struct blk_mq_tag_set tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) NULL_IRQ_NONE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) NULL_IRQ_SOFTIRQ = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) NULL_IRQ_TIMER = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) NULL_Q_BIO = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) NULL_Q_RQ = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) NULL_Q_MQ = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static int g_no_sched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) module_param_named(no_sched, g_no_sched, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) MODULE_PARM_DESC(no_sched, "No io scheduler");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static int g_submit_queues = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) module_param_named(submit_queues, g_submit_queues, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) MODULE_PARM_DESC(submit_queues, "Number of submission queues");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static int g_home_node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) module_param_named(home_node, g_home_node, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) MODULE_PARM_DESC(home_node, "Home node for the device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * For more details about fault injection, please refer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * Documentation/fault-injection/fault-injection.rst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static char g_timeout_str[80];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) MODULE_PARM_DESC(timeout, "Fault injection. timeout=<interval>,<probability>,<space>,<times>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static char g_requeue_str[80];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) MODULE_PARM_DESC(requeue, "Fault injection. requeue=<interval>,<probability>,<space>,<times>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static char g_init_hctx_str[80];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static int g_queue_mode = NULL_Q_MQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static int null_param_store_val(const char *str, int *val, int min, int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int ret, new_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) ret = kstrtoint(str, 10, &new_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (new_val < min || new_val > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *val = new_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static const struct kernel_param_ops null_queue_mode_param_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) .set = null_set_queue_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) .get = param_get_int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static int g_gb = 250;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) module_param_named(gb, g_gb, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) MODULE_PARM_DESC(gb, "Size in GB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static int g_bs = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) module_param_named(bs, g_bs, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) MODULE_PARM_DESC(bs, "Block size (in bytes)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static unsigned int nr_devices = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) module_param(nr_devices, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) MODULE_PARM_DESC(nr_devices, "Number of devices to register");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static bool g_blocking;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) module_param_named(blocking, g_blocking, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static bool shared_tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) module_param(shared_tags, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static bool g_shared_tag_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static int g_irqmode = NULL_IRQ_SOFTIRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int null_set_irqmode(const char *str, const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) NULL_IRQ_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static const struct kernel_param_ops null_irqmode_param_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) .set = null_set_irqmode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) .get = param_get_int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static unsigned long g_completion_nsec = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static int g_hw_queue_depth = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static bool g_use_per_node_hctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static bool g_zoned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) module_param_named(zoned, g_zoned, bool, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static unsigned long g_zone_size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static unsigned long g_zone_capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) module_param_named(zone_capacity, g_zone_capacity, ulong, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) MODULE_PARM_DESC(zone_capacity, "Zone capacity in MB when block device is zoned. Can be less than or equal to zone size. Default: Zone size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static unsigned int g_zone_nr_conv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static unsigned int g_zone_max_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) module_param_named(zone_max_open, g_zone_max_open, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones when block device is zoned. Default: 0 (no limit)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static unsigned int g_zone_max_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) module_param_named(zone_max_active, g_zone_max_active, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static struct nullb_device *null_alloc_dev(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void null_free_dev(struct nullb_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static void null_del_dev(struct nullb *nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int null_add_dev(struct nullb_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static inline struct nullb_device *to_nullb_device(struct config_item *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return item ? container_of(item, struct nullb_device, item) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return snprintf(page, PAGE_SIZE, "%u\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return snprintf(page, PAGE_SIZE, "%lu\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return snprintf(page, PAGE_SIZE, "%u\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static ssize_t nullb_device_uint_attr_store(unsigned int *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) unsigned int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) result = kstrtouint(page, 0, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *val = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) result = kstrtoul(page, 0, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) *val = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) bool tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) result = kstrtobool(page, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) *val = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* The following macro should only be used with TYPE = {uint, ulong, bool}. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static ssize_t \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) nullb_device_##NAME##_show(struct config_item *item, char *page) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return nullb_device_##TYPE##_attr_show( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) to_nullb_device(item)->NAME, page); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static ssize_t \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) nullb_device_##NAME##_store(struct config_item *item, const char *page, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) size_t count) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct nullb_device *dev = to_nullb_device(item); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) TYPE new_value = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) int ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (ret < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (apply_fn) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ret = apply_fn(dev, new_value); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ret = -EBUSY; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (ret < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) dev->NAME = new_value; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return count; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) CONFIGFS_ATTR(nullb_device_, NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static int nullb_apply_submit_queues(struct nullb_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) unsigned int submit_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct nullb *nullb = dev->nullb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct blk_mq_tag_set *set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (!nullb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * Make sure that null_init_hctx() does not access nullb->queues[] past
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * the end of that array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (submit_queues > nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) set = nullb->tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) blk_mq_update_nr_hw_queues(set, submit_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) NULLB_DEVICE_ATTR(size, ulong, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) NULLB_DEVICE_ATTR(home_node, uint, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) NULLB_DEVICE_ATTR(blocksize, uint, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) NULLB_DEVICE_ATTR(irqmode, uint, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) NULLB_DEVICE_ATTR(index, uint, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) NULLB_DEVICE_ATTR(blocking, bool, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) NULLB_DEVICE_ATTR(discard, bool, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) NULLB_DEVICE_ATTR(mbps, uint, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) NULLB_DEVICE_ATTR(zoned, bool, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static ssize_t nullb_device_power_show(struct config_item *item, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static ssize_t nullb_device_power_store(struct config_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct nullb_device *dev = to_nullb_device(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) bool newp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ret = nullb_device_bool_attr_store(&newp, page, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (!dev->power && newp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (null_add_dev(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) clear_bit(NULLB_DEV_FL_UP, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) dev->power = newp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) } else if (dev->power && !newp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) mutex_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) dev->power = newp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) null_del_dev(dev->nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) mutex_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) CONFIGFS_ATTR(nullb_device_, power);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct nullb_device *t_dev = to_nullb_device(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return badblocks_show(&t_dev->badblocks, page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static ssize_t nullb_device_badblocks_store(struct config_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct nullb_device *t_dev = to_nullb_device(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) char *orig, *buf, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) u64 start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) orig = kstrndup(page, count, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (!orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) buf = strstrip(orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (buf[0] != '+' && buf[0] != '-')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) tmp = strchr(&buf[1], '-');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) *tmp = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ret = kstrtoull(buf + 1, 0, &start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ret = kstrtoull(tmp + 1, 0, &end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (start > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* enable badblocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) cmpxchg(&t_dev->badblocks.shift, -1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (buf[0] == '+')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ret = badblocks_set(&t_dev->badblocks, start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) end - start + 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) ret = badblocks_clear(&t_dev->badblocks, start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) end - start + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) kfree(orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) CONFIGFS_ATTR(nullb_device_, badblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static struct configfs_attribute *nullb_device_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) &nullb_device_attr_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) &nullb_device_attr_completion_nsec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) &nullb_device_attr_submit_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) &nullb_device_attr_home_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) &nullb_device_attr_queue_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) &nullb_device_attr_blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) &nullb_device_attr_irqmode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) &nullb_device_attr_hw_queue_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) &nullb_device_attr_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) &nullb_device_attr_blocking,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) &nullb_device_attr_use_per_node_hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) &nullb_device_attr_power,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) &nullb_device_attr_memory_backed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) &nullb_device_attr_discard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) &nullb_device_attr_mbps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) &nullb_device_attr_cache_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) &nullb_device_attr_badblocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) &nullb_device_attr_zoned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) &nullb_device_attr_zone_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) &nullb_device_attr_zone_capacity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) &nullb_device_attr_zone_nr_conv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) &nullb_device_attr_zone_max_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) &nullb_device_attr_zone_max_active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static void nullb_device_release(struct config_item *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct nullb_device *dev = to_nullb_device(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) null_free_device_storage(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) null_free_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static struct configfs_item_operations nullb_device_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) .release = nullb_device_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static const struct config_item_type nullb_device_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) .ct_item_ops = &nullb_device_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) .ct_attrs = nullb_device_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) .ct_owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) config_item *nullb_group_make_item(struct config_group *group, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct nullb_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dev = null_alloc_dev();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) config_item_init_type_name(&dev->item, name, &nullb_device_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return &dev->item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) nullb_group_drop_item(struct config_group *group, struct config_item *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct nullb_device *dev = to_nullb_device(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) mutex_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) dev->power = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) null_del_dev(dev->nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) mutex_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) config_item_put(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static ssize_t memb_group_features_show(struct config_item *item, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return snprintf(page, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) CONFIGFS_ATTR_RO(memb_group_, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static struct configfs_attribute *nullb_group_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) &memb_group_attr_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static struct configfs_group_operations nullb_group_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) .make_item = nullb_group_make_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) .drop_item = nullb_group_drop_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static const struct config_item_type nullb_group_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) .ct_group_ops = &nullb_group_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) .ct_attrs = nullb_group_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) .ct_owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static struct configfs_subsystem nullb_subsys = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) .su_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) .cg_item = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) .ci_namebuf = "nullb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) .ci_type = &nullb_group_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static inline int null_cache_active(struct nullb *nullb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static struct nullb_device *null_alloc_dev(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct nullb_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) dev = kzalloc(sizeof(*dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (badblocks_init(&dev->badblocks, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) dev->size = g_gb * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) dev->completion_nsec = g_completion_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) dev->submit_queues = g_submit_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) dev->home_node = g_home_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) dev->queue_mode = g_queue_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) dev->blocksize = g_bs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) dev->irqmode = g_irqmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) dev->hw_queue_depth = g_hw_queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) dev->blocking = g_blocking;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) dev->use_per_node_hctx = g_use_per_node_hctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) dev->zoned = g_zoned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) dev->zone_size = g_zone_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) dev->zone_capacity = g_zone_capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) dev->zone_nr_conv = g_zone_nr_conv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) dev->zone_max_open = g_zone_max_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) dev->zone_max_active = g_zone_max_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static void null_free_dev(struct nullb_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) null_free_zoned_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) badblocks_exit(&dev->badblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static void put_tag(struct nullb_queue *nq, unsigned int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) clear_bit_unlock(tag, nq->tag_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (waitqueue_active(&nq->wait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) wake_up(&nq->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static unsigned int get_tag(struct nullb_queue *nq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) unsigned int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (tag >= nq->queue_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return -1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) } while (test_and_set_bit_lock(tag, nq->tag_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static void free_cmd(struct nullb_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) put_tag(cmd->nq, cmd->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct nullb_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) unsigned int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) tag = get_tag(nq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (tag != -1U) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) cmd = &nq->cmds[tag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) cmd->tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) cmd->error = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) cmd->nq = nq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (nq->dev->irqmode == NULL_IRQ_TIMER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) cmd->timer.function = null_cmd_timer_expired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct nullb_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) cmd = __alloc_cmd(nq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (cmd || !can_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) cmd = __alloc_cmd(nq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) io_schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) finish_wait(&nq->wait, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static void end_cmd(struct nullb_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) int queue_mode = cmd->nq->dev->queue_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) switch (queue_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) case NULL_Q_MQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) blk_mq_end_request(cmd->rq, cmd->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) case NULL_Q_BIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) cmd->bio->bi_status = cmd->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) bio_endio(cmd->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) free_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) end_cmd(container_of(timer, struct nullb_cmd, timer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static void null_cmd_end_timer(struct nullb_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ktime_t kt = cmd->nq->dev->completion_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static void null_complete_rq(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) end_cmd(blk_mq_rq_to_pdu(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct nullb_page *t_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (!t_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) t_page->page = alloc_pages(gfp_flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (!t_page->page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) goto out_freepage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return t_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) out_freepage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) kfree(t_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) static void null_free_page(struct nullb_page *t_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) __free_page(t_page->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) kfree(t_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static bool null_page_empty(struct nullb_page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) int size = MAP_SZ - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return find_first_bit(page->bitmap, size) == size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static void null_free_sector(struct nullb *nullb, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) bool is_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) unsigned int sector_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) u64 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct nullb_page *t_page, *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct radix_tree_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) idx = sector >> PAGE_SECTORS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) sector_bit = (sector & SECTOR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) t_page = radix_tree_lookup(root, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (t_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) __clear_bit(sector_bit, t_page->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (null_page_empty(t_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) ret = radix_tree_delete_item(root, idx, t_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) WARN_ON(ret != t_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) null_free_page(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (is_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) nullb->dev->curr_cache -= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) struct nullb_page *t_page, bool is_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct radix_tree_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (radix_tree_insert(root, idx, t_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) null_free_page(t_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) t_page = radix_tree_lookup(root, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) WARN_ON(!t_page || t_page->page->index != idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) } else if (is_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) nullb->dev->curr_cache += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return t_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) unsigned long pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct nullb_page *ret, *t_pages[FREE_BATCH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct radix_tree_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) root = is_cache ? &dev->cache : &dev->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) nr_pages = radix_tree_gang_lookup(root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) (void **)t_pages, pos, FREE_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) pos = t_pages[i]->page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) ret = radix_tree_delete_item(root, pos, t_pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) WARN_ON(ret != t_pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) null_free_page(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) pos++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) } while (nr_pages == FREE_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (is_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) dev->curr_cache = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static struct nullb_page *__null_lookup_page(struct nullb *nullb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) sector_t sector, bool for_write, bool is_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) unsigned int sector_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) u64 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct nullb_page *t_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct radix_tree_root *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) idx = sector >> PAGE_SECTORS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) sector_bit = (sector & SECTOR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) t_page = radix_tree_lookup(root, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) WARN_ON(t_page && t_page->page->index != idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) return t_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static struct nullb_page *null_lookup_page(struct nullb *nullb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) sector_t sector, bool for_write, bool ignore_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) struct nullb_page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (!ignore_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) page = __null_lookup_page(nullb, sector, for_write, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return __null_lookup_page(nullb, sector, for_write, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) static struct nullb_page *null_insert_page(struct nullb *nullb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) sector_t sector, bool ignore_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) __releases(&nullb->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) __acquires(&nullb->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) u64 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct nullb_page *t_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) t_page = null_lookup_page(nullb, sector, true, ignore_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (t_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return t_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) spin_unlock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) t_page = null_alloc_page(GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (!t_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) goto out_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (radix_tree_preload(GFP_NOIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) goto out_freepage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) spin_lock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) idx = sector >> PAGE_SECTORS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) t_page->page->index = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) radix_tree_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) return t_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) out_freepage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) null_free_page(t_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) out_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) spin_lock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return null_lookup_page(nullb, sector, true, ignore_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) u64 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct nullb_page *t_page, *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) void *dst, *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) idx = c_page->page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) null_free_page(c_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (t_page && null_page_empty(t_page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ret = radix_tree_delete_item(&nullb->dev->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) idx, t_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) null_free_page(t_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (!t_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) src = kmap_atomic(c_page->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) dst = kmap_atomic(t_page->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) for (i = 0; i < PAGE_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (test_bit(i, c_page->bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) offset = (i << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) memcpy(dst + offset, src + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) nullb->dev->blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) __set_bit(i, t_page->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) null_free_page(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) nullb->dev->curr_cache -= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static int null_make_cache_space(struct nullb *nullb, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) int i, err, nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct nullb_page *c_pages[FREE_BATCH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) unsigned long flushed = 0, one_round;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if ((nullb->dev->cache_size * 1024 * 1024) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * nullb_flush_cache_page could unlock before using the c_pages. To
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * avoid race, we don't allow page free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) nullb->cache_flush_pos = c_pages[i]->page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * We found the page which is being flushed to disk by other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) c_pages[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) one_round = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (c_pages[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) err = null_flush_cache_page(nullb, c_pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) one_round++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) flushed += one_round << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (n > flushed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (nr_pages == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) nullb->cache_flush_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (one_round == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /* give other threads a chance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) spin_unlock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) spin_lock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static int copy_to_nullb(struct nullb *nullb, struct page *source,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) unsigned int off, sector_t sector, size_t n, bool is_fua)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) size_t temp, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct nullb_page *t_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) void *dst, *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) while (count < n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) temp = min_t(size_t, nullb->dev->blocksize, n - count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (null_cache_active(nullb) && !is_fua)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) null_make_cache_space(nullb, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) t_page = null_insert_page(nullb, sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) !null_cache_active(nullb) || is_fua);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (!t_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) src = kmap_atomic(source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) dst = kmap_atomic(t_page->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) memcpy(dst + offset, src + off + count, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) __set_bit(sector & SECTOR_MASK, t_page->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (is_fua)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) null_free_sector(nullb, sector, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) count += temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) sector += temp >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static int copy_from_nullb(struct nullb *nullb, struct page *dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) unsigned int off, sector_t sector, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) size_t temp, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct nullb_page *t_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) void *dst, *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) while (count < n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) temp = min_t(size_t, nullb->dev->blocksize, n - count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) t_page = null_lookup_page(nullb, sector, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) !null_cache_active(nullb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) dst = kmap_atomic(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (!t_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) memset(dst + off + count, 0, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) src = kmap_atomic(t_page->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) memcpy(dst + off + count, src + offset, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) count += temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) sector += temp >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) unsigned int len, unsigned int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) void *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) dst = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) memset(dst + off, 0xFF, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) kunmap_atomic(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) size_t temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) spin_lock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) while (n > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) temp = min_t(size_t, n, nullb->dev->blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) null_free_sector(nullb, sector, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (null_cache_active(nullb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) null_free_sector(nullb, sector, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) sector += temp >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) n -= temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) spin_unlock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static int null_handle_flush(struct nullb *nullb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (!null_cache_active(nullb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) spin_lock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) err = null_make_cache_space(nullb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) nullb->dev->cache_size * 1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (err || nullb->dev->curr_cache == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) WARN_ON(!radix_tree_empty(&nullb->dev->cache));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) spin_unlock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) static int null_transfer(struct nullb *nullb, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) unsigned int len, unsigned int off, bool is_write, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) bool is_fua)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct nullb_device *dev = nullb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) unsigned int valid_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (!is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (dev->zoned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) valid_len = null_zone_valid_read_len(nullb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) sector, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (valid_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) err = copy_from_nullb(nullb, page, off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) sector, valid_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) off += valid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) len -= valid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) nullb_fill_pattern(nullb, page, len, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) static int null_handle_rq(struct nullb_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct request *rq = cmd->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) struct nullb *nullb = cmd->nq->dev->nullb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct req_iterator iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct bio_vec bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) sector = blk_rq_pos(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (req_op(rq) == REQ_OP_DISCARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) null_handle_discard(nullb, sector, blk_rq_bytes(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) spin_lock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) rq_for_each_segment(bvec, rq, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) len = bvec.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) op_is_write(req_op(rq)), sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) rq->cmd_flags & REQ_FUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) spin_unlock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) sector += len >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) spin_unlock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) static int null_handle_bio(struct nullb_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) struct bio *bio = cmd->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) struct nullb *nullb = cmd->nq->dev->nullb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) struct bio_vec bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct bvec_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) sector = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (bio_op(bio) == REQ_OP_DISCARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) null_handle_discard(nullb, sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) bio_sectors(bio) << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) spin_lock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) bio_for_each_segment(bvec, bio, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) len = bvec.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) op_is_write(bio_op(bio)), sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) bio->bi_opf & REQ_FUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) spin_unlock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) sector += len >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) spin_unlock_irq(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) static void null_stop_queue(struct nullb *nullb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) struct request_queue *q = nullb->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (nullb->dev->queue_mode == NULL_Q_MQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) blk_mq_stop_hw_queues(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static void null_restart_queue_async(struct nullb *nullb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct request_queue *q = nullb->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (nullb->dev->queue_mode == NULL_Q_MQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) blk_mq_start_stopped_hw_queues(q, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) struct nullb_device *dev = cmd->nq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) struct nullb *nullb = dev->nullb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) blk_status_t sts = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct request *rq = cmd->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (!hrtimer_active(&nullb->bw_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) hrtimer_restart(&nullb->bw_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) null_stop_queue(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /* race with timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (atomic_long_read(&nullb->cur_bytes) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) null_restart_queue_async(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /* requeue request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) sts = BLK_STS_DEV_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) return sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) sector_t nr_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) struct badblocks *bb = &cmd->nq->dev->badblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) sector_t first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) int bad_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) enum req_opf op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct nullb_device *dev = cmd->nq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (dev->queue_mode == NULL_Q_BIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) err = null_handle_bio(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) err = null_handle_rq(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return errno_to_blk_status(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct nullb_device *dev = cmd->nq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (dev->memory_backed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (dev->queue_mode == NULL_Q_BIO && bio_op(cmd->bio) == REQ_OP_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) zero_fill_bio(cmd->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) } else if (req_op(cmd->rq) == REQ_OP_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) __rq_for_each_bio(bio, cmd->rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) zero_fill_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * Since root privileges are required to configure the null_blk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * driver, it is fine that this driver does not initialize the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * data buffers of read commands. Zero-initialize these buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * anyway if KMSAN is enabled to prevent that KMSAN complains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * about null_blk not initializing read data buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (IS_ENABLED(CONFIG_KMSAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) nullb_zero_read_cmd_buffer(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) /* Complete IO by inline, softirq or timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) switch (cmd->nq->dev->irqmode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) case NULL_IRQ_SOFTIRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) switch (cmd->nq->dev->queue_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) case NULL_Q_MQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (likely(!blk_should_fake_timeout(cmd->rq->q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) blk_mq_complete_request(cmd->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) case NULL_Q_BIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * XXX: no proper submitting cpu information available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) end_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) case NULL_IRQ_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) end_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) case NULL_IRQ_TIMER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) null_cmd_end_timer(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) blk_status_t null_process_cmd(struct nullb_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) enum req_opf op, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) unsigned int nr_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) struct nullb_device *dev = cmd->nq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) blk_status_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) if (dev->badblocks.shift != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) ret = null_handle_badblocks(cmd, sector, nr_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (ret != BLK_STS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (dev->memory_backed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return null_handle_memory_backed(cmd, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) sector_t nr_sectors, enum req_opf op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct nullb_device *dev = cmd->nq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) struct nullb *nullb = dev->nullb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) blk_status_t sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) sts = null_handle_throttled(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (sts != BLK_STS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) return sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (op == REQ_OP_FLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) cmd->error = errno_to_blk_status(null_handle_flush(nullb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (dev->zoned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) sts = null_process_cmd(cmd, op, sector, nr_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /* Do not overwrite errors (e.g. timeout errors) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (cmd->error == BLK_STS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) cmd->error = sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) nullb_complete_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) unsigned int mbps = nullb->dev->mbps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) null_restart_queue_async(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) hrtimer_forward_now(&nullb->bw_timer, timer_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static void nullb_setup_bwtimer(struct nullb *nullb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) nullb->bw_timer.function = nullb_bwtimer_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) int index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (nullb->nr_queues != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) return &nullb->queues[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static blk_qc_t null_submit_bio(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) sector_t sector = bio->bi_iter.bi_sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) sector_t nr_sectors = bio_sectors(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) struct nullb *nullb = bio->bi_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) struct nullb_queue *nq = nullb_to_queue(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) struct nullb_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) cmd = alloc_cmd(nq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) cmd->bio = bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return BLK_QC_T_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) static bool should_timeout_request(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (g_timeout_str[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) return should_fail(&null_timeout_attr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) static bool should_requeue_request(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (g_requeue_str[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return should_fail(&null_requeue_attr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) pr_info("rq %p timed out\n", rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * If the device is marked as blocking (i.e. memory backed or zoned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * device), the submission path may be blocked waiting for resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * and cause real timeouts. For these real timeouts, the submission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * path will complete the request using blk_mq_complete_request().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * Only fake timeouts need to execute blk_mq_complete_request() here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) cmd->error = BLK_STS_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (cmd->fake_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) blk_mq_complete_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) return BLK_EH_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) const struct blk_mq_queue_data *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) struct nullb_queue *nq = hctx->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) sector_t nr_sectors = blk_rq_sectors(bd->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) sector_t sector = blk_rq_pos(bd->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (nq->dev->irqmode == NULL_IRQ_TIMER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) cmd->timer.function = null_cmd_timer_expired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) cmd->rq = bd->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) cmd->error = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) cmd->nq = nq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) cmd->fake_timeout = should_timeout_request(bd->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) blk_mq_start_request(bd->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (should_requeue_request(bd->rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * Alternate between hitting the core BUSY path, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * driver driven requeue path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) nq->requeue_selection++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (nq->requeue_selection & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) blk_mq_requeue_request(bd->rq, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (cmd->fake_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static void cleanup_queue(struct nullb_queue *nq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) kfree(nq->tag_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) kfree(nq->cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) static void cleanup_queues(struct nullb *nullb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) for (i = 0; i < nullb->nr_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) cleanup_queue(&nullb->queues[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) kfree(nullb->queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct nullb_queue *nq = hctx->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct nullb *nullb = nq->dev->nullb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) nullb->nr_queues--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) init_waitqueue_head(&nq->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) nq->queue_depth = nullb->queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) nq->dev = nullb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) unsigned int hctx_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) struct nullb *nullb = hctx->queue->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) struct nullb_queue *nq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (g_init_hctx_str[0] && should_fail(&null_init_hctx_attr, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) nq = &nullb->queues[hctx_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) hctx->driver_data = nq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) null_init_queue(nullb, nq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) nullb->nr_queues++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) static const struct blk_mq_ops null_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) .queue_rq = null_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) .complete = null_complete_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) .timeout = null_timeout_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) .init_hctx = null_init_hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) .exit_hctx = null_exit_hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) static void null_del_dev(struct nullb *nullb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) struct nullb_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (!nullb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) dev = nullb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) ida_simple_remove(&nullb_indexes, nullb->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) list_del_init(&nullb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) del_gendisk(nullb->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) hrtimer_cancel(&nullb->bw_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) atomic_long_set(&nullb->cur_bytes, LONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) null_restart_queue_async(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) blk_cleanup_queue(nullb->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (dev->queue_mode == NULL_Q_MQ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) nullb->tag_set == &nullb->__tag_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) blk_mq_free_tag_set(nullb->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) put_disk(nullb->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) cleanup_queues(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (null_cache_active(nullb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) null_free_device_storage(nullb->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) kfree(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) dev->nullb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) static void null_config_discard(struct nullb *nullb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (nullb->dev->discard == false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (nullb->dev->zoned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) nullb->dev->discard = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) pr_info("discard option is ignored in zoned mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) nullb->q->limits.discard_granularity = nullb->dev->blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) nullb->q->limits.discard_alignment = nullb->dev->blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) static const struct block_device_operations null_bio_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) .submit_bio = null_submit_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) .report_zones = null_report_zones,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) static const struct block_device_operations null_rq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) .report_zones = null_report_zones,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static int setup_commands(struct nullb_queue *nq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) struct nullb_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) int i, tag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (!nq->cmds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if (!nq->tag_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) kfree(nq->cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) for (i = 0; i < nq->queue_depth; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) cmd = &nq->cmds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) cmd->tag = -1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) static int setup_queues(struct nullb *nullb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) nullb->queues = kcalloc(nr_cpu_ids, sizeof(struct nullb_queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (!nullb->queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) nullb->queue_depth = nullb->dev->hw_queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) static int init_driver_queues(struct nullb *nullb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct nullb_queue *nq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) for (i = 0; i < nullb->dev->submit_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) nq = &nullb->queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) null_init_queue(nullb, nq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) ret = setup_commands(nq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) nullb->nr_queues++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) static int null_gendisk_register(struct nullb *nullb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (!disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) set_capacity(disk, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) disk->major = null_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) disk->first_minor = nullb->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (queue_is_mq(nullb->q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) disk->fops = &null_rq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) disk->fops = &null_bio_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) disk->private_data = nullb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) disk->queue = nullb->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (nullb->dev->zoned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) int ret = null_register_zoned_dev(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) add_disk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) set->ops = &null_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) g_submit_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) g_hw_queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) set->cmd_size = sizeof(struct nullb_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) set->flags = BLK_MQ_F_SHOULD_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (g_no_sched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) set->flags |= BLK_MQ_F_NO_SCHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) if (g_shared_tag_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) set->driver_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if ((nullb && nullb->dev->blocking) || g_blocking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) set->flags |= BLK_MQ_F_BLOCKING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) return blk_mq_alloc_tag_set(set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) static int null_validate_conf(struct nullb_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) dev->blocksize = round_down(dev->blocksize, 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (dev->submit_queues != nr_online_nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) dev->submit_queues = nr_online_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) } else if (dev->submit_queues > nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) dev->submit_queues = nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) else if (dev->submit_queues == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) dev->submit_queues = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) /* Do memory allocation, so set blocking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (dev->memory_backed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) dev->blocking = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) else /* cache is meaningless */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) dev->cache_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) dev->cache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) /* can not stop a queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (dev->queue_mode == NULL_Q_BIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) dev->mbps = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (dev->zoned &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) (!dev->zone_size || !is_power_of_2(dev->zone_size))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) pr_err("zone_size must be power-of-two\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) static bool __null_setup_fault(struct fault_attr *attr, char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (!str[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (!setup_fault_attr(attr, str))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) attr->verbose = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) static bool null_setup_fault(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) if (!__null_setup_fault(&null_init_hctx_attr, g_init_hctx_str))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) static int null_add_dev(struct nullb_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) struct nullb *nullb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) rv = null_validate_conf(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) if (!nullb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) rv = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) nullb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) dev->nullb = nullb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) spin_lock_init(&nullb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) rv = setup_queues(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) goto out_free_nullb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) if (dev->queue_mode == NULL_Q_MQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (shared_tags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) nullb->tag_set = &tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) rv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) nullb->tag_set = &nullb->__tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) rv = null_init_tag_set(nullb, nullb->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) goto out_cleanup_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (!null_setup_fault())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) goto out_cleanup_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) nullb->tag_set->timeout = 5 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) nullb->q = blk_mq_init_queue_data(nullb->tag_set, nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (IS_ERR(nullb->q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) rv = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) goto out_cleanup_tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) } else if (dev->queue_mode == NULL_Q_BIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) nullb->q = blk_alloc_queue(dev->home_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) if (!nullb->q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) rv = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) goto out_cleanup_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) rv = init_driver_queues(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) goto out_cleanup_blk_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) if (dev->mbps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) nullb_setup_bwtimer(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (dev->cache_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) blk_queue_write_cache(nullb->q, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if (dev->zoned) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) rv = null_init_zoned_dev(dev, nullb->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) goto out_cleanup_blk_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) nullb->q->queuedata = nullb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) mutex_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) dev->index = nullb->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) mutex_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) blk_queue_logical_block_size(nullb->q, dev->blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) blk_queue_physical_block_size(nullb->q, dev->blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) null_config_discard(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) sprintf(nullb->disk_name, "nullb%d", nullb->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) rv = null_gendisk_register(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if (rv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) goto out_cleanup_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) mutex_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) list_add_tail(&nullb->list, &nullb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) mutex_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) out_cleanup_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) null_free_zoned_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) out_cleanup_blk_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) blk_cleanup_queue(nullb->q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) out_cleanup_tags:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) blk_mq_free_tag_set(nullb->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) out_cleanup_queues:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) cleanup_queues(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) out_free_nullb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) kfree(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) dev->nullb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) static int __init null_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) struct nullb *nullb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) struct nullb_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) if (g_bs > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) pr_warn("invalid block size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) pr_warn("defaults block size to %lu\n", PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) g_bs = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) pr_err("invalid home_node value\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) g_home_node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) if (g_queue_mode == NULL_Q_RQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) pr_err("legacy IO path no longer available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) if (g_submit_queues != nr_online_nodes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) pr_warn("submit_queues param is set to %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) nr_online_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) g_submit_queues = nr_online_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) } else if (g_submit_queues > nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) g_submit_queues = nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) else if (g_submit_queues <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) g_submit_queues = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (g_queue_mode == NULL_Q_MQ && shared_tags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) ret = null_init_tag_set(NULL, &tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) config_group_init(&nullb_subsys.su_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) mutex_init(&nullb_subsys.su_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) ret = configfs_register_subsystem(&nullb_subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) goto err_tagset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) mutex_init(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) null_major = register_blkdev(0, "nullb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) if (null_major < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) ret = null_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) goto err_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) for (i = 0; i < nr_devices; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) dev = null_alloc_dev();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) goto err_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) ret = null_add_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) null_free_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) goto err_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) pr_info("module loaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) err_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) while (!list_empty(&nullb_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) nullb = list_entry(nullb_list.next, struct nullb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) dev = nullb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) null_del_dev(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) null_free_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) unregister_blkdev(null_major, "nullb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) err_conf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) configfs_unregister_subsystem(&nullb_subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) err_tagset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (g_queue_mode == NULL_Q_MQ && shared_tags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) blk_mq_free_tag_set(&tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) static void __exit null_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) struct nullb *nullb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) configfs_unregister_subsystem(&nullb_subsys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) unregister_blkdev(null_major, "nullb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) mutex_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) while (!list_empty(&nullb_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) struct nullb_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) nullb = list_entry(nullb_list.next, struct nullb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) dev = nullb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) null_del_dev(nullb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) null_free_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) mutex_unlock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) if (g_queue_mode == NULL_Q_MQ && shared_tags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) blk_mq_free_tag_set(&tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) module_init(null_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) module_exit(null_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) MODULE_LICENSE("GPL");