^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Functions related to sysfs handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/blktrace_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/blk-cgroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "blk-mq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "blk-mq-debugfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "blk-wbt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct queue_sysfs_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct attribute attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) ssize_t (*show)(struct request_queue *, char *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) ssize_t (*store)(struct request_queue *, const char *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) queue_var_show(unsigned long var, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return sprintf(page, "%lu\n", var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) queue_var_store(unsigned long *var, const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned long v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) err = kstrtoul(page, 10, &v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (err || v > UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *var = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static ssize_t queue_var_store64(s64 *var, const char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) s64 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) err = kstrtos64(page, 10, &v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *var = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static ssize_t queue_requests_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return queue_var_show(q->nr_requests, (page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) queue_requests_store(struct request_queue *q, const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned long nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int ret, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (!queue_is_mq(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ret = queue_var_store(&nr, page, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (nr < BLKDEV_MIN_RQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) nr = BLKDEV_MIN_RQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) err = blk_mq_update_nr_requests(q, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static ssize_t queue_ra_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned long ra_kb = q->backing_dev_info->ra_pages <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) (PAGE_SHIFT - 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return queue_var_show(ra_kb, (page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) queue_ra_store(struct request_queue *q, const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned long ra_kb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ssize_t ret = queue_var_store(&ra_kb, page, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int max_sectors_kb = queue_max_sectors(q) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return queue_var_show(max_sectors_kb, (page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return queue_var_show(queue_max_segments(q), (page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static ssize_t queue_max_discard_segments_show(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return queue_var_show(queue_max_discard_segments(q), (page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return queue_var_show(q->limits.max_integrity_segments, (page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return queue_var_show(queue_max_segment_size(q), (page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return queue_var_show(queue_logical_block_size(q), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return queue_var_show(queue_physical_block_size(q), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return queue_var_show(q->limits.chunk_sectors, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static ssize_t queue_io_min_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return queue_var_show(queue_io_min(q), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return queue_var_show(queue_io_opt(q), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return queue_var_show(q->limits.discard_granularity, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return sprintf(page, "%llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) (unsigned long long)q->limits.max_hw_discard_sectors << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return sprintf(page, "%llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) (unsigned long long)q->limits.max_discard_sectors << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static ssize_t queue_discard_max_store(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) unsigned long max_discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ssize_t ret = queue_var_store(&max_discard, page, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (max_discard & (q->limits.discard_granularity - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) max_discard >>= 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (max_discard > UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (max_discard > q->limits.max_hw_discard_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) max_discard = q->limits.max_hw_discard_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) q->limits.max_discard_sectors = max_discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return queue_var_show(0, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return sprintf(page, "%llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) (unsigned long long)q->limits.max_write_same_sectors << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return sprintf(page, "%llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned long long max_sectors = q->limits.max_zone_append_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned long max_sectors_kb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) page_kb = 1 << (PAGE_SHIFT - 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) q->limits.max_dev_sectors >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) spin_lock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) q->limits.max_sectors = max_sectors_kb << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) spin_unlock_irq(&q->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return queue_var_show(max_hw_sectors_kb, (page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static ssize_t \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) queue_##name##_show(struct request_queue *q, char *page) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int bit; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return queue_var_show(neg ? !bit : bit, page); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static ssize_t \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) unsigned long val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ssize_t ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ret = queue_var_store(&val, page, count); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (ret < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (neg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) val = !val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #undef QUEUE_SYSFS_BIT_FNS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static ssize_t queue_zoned_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) switch (blk_queue_zoned_model(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) case BLK_ZONED_HA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return sprintf(page, "host-aware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) case BLK_ZONED_HM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return sprintf(page, "host-managed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return sprintf(page, "none\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return queue_var_show(blk_queue_nr_zones(q), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return queue_var_show(queue_max_open_zones(q), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return queue_var_show(queue_max_active_zones(q), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return queue_var_show((blk_queue_nomerges(q) << 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) blk_queue_noxmerges(q), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned long nm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ssize_t ret = queue_var_store(&nm, page, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (nm == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) else if (nm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return queue_var_show(set << force, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ssize_t ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ret = queue_var_store(&val, page, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (val == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) } else if (val == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) } else if (val == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) val = BLK_MQ_POLL_CLASSIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) val = q->poll_nsec / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return sprintf(page, "%d\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int err, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (!q->mq_ops || !q->mq_ops->poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) err = kstrtoint(page, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (val == BLK_MQ_POLL_CLASSIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) q->poll_nsec = BLK_MQ_POLL_CLASSIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) else if (val >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) q->poll_nsec = val * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static ssize_t queue_poll_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static ssize_t queue_poll_store(struct request_queue *q, const char *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) unsigned long poll_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ret = queue_var_store(&poll_on, page, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (poll_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) blk_queue_flag_set(QUEUE_FLAG_POLL, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) err = kstrtou32(page, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (err || val == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) blk_queue_rq_timeout(q, msecs_to_jiffies(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!wbt_rq_qos(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct rq_qos *rqos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) s64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) ret = queue_var_store64(&val, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (val < -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) rqos = wbt_rq_qos(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (!rqos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ret = wbt_init(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (val == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) val = wbt_default_latency_nsec(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) else if (val >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) val *= 1000ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (wbt_get_min_lat(q) == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Ensure that the queue is idled, in case the latency update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * ends up either enabling or disabling wbt completely. We can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * have IO inflight if that happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) blk_mq_freeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) blk_mq_quiesce_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) wbt_set_min_lat(q, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) blk_mq_unquiesce_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) blk_mq_unfreeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static ssize_t queue_wc_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return sprintf(page, "write back\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return sprintf(page, "write through\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static ssize_t queue_wc_store(struct request_queue *q, const char *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int set = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (!strncmp(page, "write back", 10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) set = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) else if (!strncmp(page, "write through", 13) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) !strncmp(page, "none", 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) set = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (set == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) blk_queue_flag_set(QUEUE_FLAG_WC, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) blk_queue_flag_clear(QUEUE_FLAG_WC, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static ssize_t queue_fua_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static ssize_t queue_dax_show(struct request_queue *q, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return queue_var_show(blk_queue_dax(q), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) #define QUEUE_RO_ENTRY(_prefix, _name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static struct queue_sysfs_entry _prefix##_entry = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) .attr = { .name = _name, .mode = 0444 }, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) .show = _prefix##_show, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) #define QUEUE_RW_ENTRY(_prefix, _name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) static struct queue_sysfs_entry _prefix##_entry = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) .attr = { .name = _name, .mode = 0644 }, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) .show = _prefix##_show, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) .store = _prefix##_store, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) QUEUE_RW_ENTRY(queue_requests, "nr_requests");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) QUEUE_RW_ENTRY(elv_iosched, "scheduler");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) QUEUE_RO_ENTRY(queue_zoned, "zoned");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) QUEUE_RW_ENTRY(queue_poll, "io_poll");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) QUEUE_RW_ENTRY(queue_wc, "write_cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) QUEUE_RO_ENTRY(queue_fua, "fua");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) QUEUE_RO_ENTRY(queue_dax, "dax");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* legacy alias for logical_block_size: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static struct queue_sysfs_entry queue_hw_sector_size_entry = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) .attr = {.name = "hw_sector_size", .mode = 0444 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) .show = queue_logical_block_size_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) QUEUE_RW_ENTRY(queue_nonrot, "rotational");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) QUEUE_RW_ENTRY(queue_iostats, "iostats");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) QUEUE_RW_ENTRY(queue_random, "add_random");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) static struct attribute *queue_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) &queue_requests_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) &queue_ra_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) &queue_max_hw_sectors_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) &queue_max_sectors_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) &queue_max_segments_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) &queue_max_discard_segments_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) &queue_max_integrity_segments_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) &queue_max_segment_size_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) &elv_iosched_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) &queue_hw_sector_size_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) &queue_logical_block_size_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) &queue_physical_block_size_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) &queue_chunk_sectors_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) &queue_io_min_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) &queue_io_opt_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) &queue_discard_granularity_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) &queue_discard_max_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) &queue_discard_max_hw_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) &queue_discard_zeroes_data_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) &queue_write_same_max_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) &queue_write_zeroes_max_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) &queue_zone_append_max_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) &queue_nonrot_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) &queue_zoned_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) &queue_nr_zones_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) &queue_max_open_zones_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) &queue_max_active_zones_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) &queue_nomerges_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) &queue_rq_affinity_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) &queue_iostats_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) &queue_stable_writes_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) &queue_random_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) &queue_poll_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) &queue_wc_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) &queue_fua_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) &queue_dax_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) &queue_wb_lat_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) &queue_poll_delay_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) &queue_io_timeout_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) &blk_throtl_sample_time_entry.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct request_queue *q =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) container_of(kobj, struct request_queue, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (attr == &queue_io_timeout_entry.attr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) (!q->mq_ops || !q->mq_ops->timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if ((attr == &queue_max_open_zones_entry.attr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) attr == &queue_max_active_zones_entry.attr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) !blk_queue_is_zoned(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) static struct attribute_group queue_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) .attrs = queue_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) .is_visible = queue_attr_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct queue_sysfs_entry *entry = to_queue(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct request_queue *q =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) container_of(kobj, struct request_queue, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) ssize_t res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (!entry->show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) mutex_lock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) res = entry->show(q, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) mutex_unlock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) queue_attr_store(struct kobject *kobj, struct attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) const char *page, size_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct queue_sysfs_entry *entry = to_queue(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) ssize_t res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (!entry->store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) q = container_of(kobj, struct request_queue, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) mutex_lock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) res = entry->store(q, page, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) mutex_unlock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static void blk_free_queue_rcu(struct rcu_head *rcu_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct request_queue *q = container_of(rcu_head, struct request_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) kmem_cache_free(blk_requestq_cachep, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static void blk_exit_queue(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * Since the I/O scheduler exit code may access cgroup information,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * perform I/O scheduler exit before disassociating from the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * cgroup controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (q->elevator) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ioc_clear_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) __elevator_exit(q, q->elevator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * Remove all references to @q from the block cgroup controller before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * restoring @q->queue_lock to avoid that restoring this pointer causes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * e.g. blkcg_print_blkgs() to crash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) blkcg_exit_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * Since the cgroup code may dereference the @q->backing_dev_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * pointer, only decrease its reference count after having removed the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * association with the block cgroup controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) bdi_put(q->backing_dev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * blk_release_queue - releases all allocated resources of the request_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * @kobj: pointer to a kobject, whose container is a request_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * This function releases all allocated resources of the request queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * The struct request_queue refcount is incremented with blk_get_queue() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * decremented with blk_put_queue(). Once the refcount reaches 0 this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * For drivers that have a request_queue on a gendisk and added with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * __device_add_disk() the refcount to request_queue will reach 0 with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * the last put_disk() called by the driver. For drivers which don't use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * __device_add_disk() this happens with blk_cleanup_queue().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * Drivers exist which depend on the release of the request_queue to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * synchronous, it should not be deferred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * Context: can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static void blk_release_queue(struct kobject *kobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) struct request_queue *q =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) container_of(kobj, struct request_queue, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) blk_stat_remove_callback(q, q->poll_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) blk_stat_free_callback(q->poll_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) blk_free_queue_stats(q->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (queue_is_mq(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct blk_mq_hw_ctx *hctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) cancel_delayed_work_sync(&q->requeue_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) queue_for_each_hw_ctx(q, hctx, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) cancel_delayed_work_sync(&hctx->run_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) blk_exit_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) blk_queue_free_zone_bitmaps(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (queue_is_mq(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) blk_mq_release(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) blk_trace_shutdown(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) mutex_lock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) debugfs_remove_recursive(q->debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) mutex_unlock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (queue_is_mq(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) blk_mq_debugfs_unregister(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) bioset_exit(&q->bio_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ida_simple_remove(&blk_queue_ida, q->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) call_rcu(&q->rcu_head, blk_free_queue_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) static const struct sysfs_ops queue_sysfs_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) .show = queue_attr_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) .store = queue_attr_store,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct kobj_type blk_queue_ktype = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) .sysfs_ops = &queue_sysfs_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) .release = blk_release_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * blk_register_queue - register a block layer queue with sysfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * @disk: Disk of which the request queue should be registered with sysfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) int blk_register_queue(struct gendisk *disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct device *dev = disk_to_dev(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct request_queue *q = disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (WARN_ON(!q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) WARN_ONCE(blk_queue_registered(q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) "%s is registering an already registered queue\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) kobject_name(&dev->kobj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * SCSI probing may synchronously create and destroy a lot of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * request_queues for non-existent devices. Shutting down a fully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * functional queue takes measureable wallclock time as RCU grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * periods are involved. To avoid excessive latency in these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * cases, a request_queue starts out in a degraded mode which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * faster to shut down and is made fully functional here as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * request_queues for non-existent devices never get registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (!blk_queue_init_done(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) percpu_ref_switch_to_percpu(&q->q_usage_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) blk_queue_update_readahead(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ret = blk_trace_init_sysfs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) mutex_lock(&q->sysfs_dir_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) blk_trace_remove_sysfs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) ret = sysfs_create_group(&q->kobj, &queue_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) blk_trace_remove_sysfs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) kobject_del(&q->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) kobject_put(&dev->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) mutex_lock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) blk_debugfs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) mutex_unlock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (queue_is_mq(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) __blk_mq_register_dev(dev, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) blk_mq_debugfs_register(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) mutex_lock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (q->elevator) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) ret = elv_register_queue(q, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) mutex_unlock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) mutex_unlock(&q->sysfs_dir_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) kobject_del(&q->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) blk_trace_remove_sysfs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) kobject_put(&dev->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) wbt_enable_default(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) blk_throtl_register_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /* Now everything is ready and send out KOBJ_ADD uevent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) kobject_uevent(&q->kobj, KOBJ_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (q->elevator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) mutex_unlock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) mutex_unlock(&q->sysfs_dir_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) EXPORT_SYMBOL_GPL(blk_register_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * blk_unregister_queue - counterpart of blk_register_queue()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * @disk: Disk of which the request queue should be unregistered from sysfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * Note: the caller is responsible for guaranteeing that this function is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * after blk_register_queue() has finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) void blk_unregister_queue(struct gendisk *disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct request_queue *q = disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (WARN_ON(!q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* Return early if disk->queue was never registered. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (!blk_queue_registered(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * Since sysfs_remove_dir() prevents adding new directory entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * before removal of existing entries starts, protect against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * concurrent elv_iosched_store() calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) mutex_lock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) mutex_unlock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) mutex_lock(&q->sysfs_dir_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * Remove the sysfs attributes before unregistering the queue data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * structures that can be modified through sysfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (queue_is_mq(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) blk_mq_unregister_dev(disk_to_dev(disk), q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) blk_trace_remove_sysfs(disk_to_dev(disk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) mutex_lock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (q->elevator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) elv_unregister_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) mutex_unlock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /* Now that we've deleted all child objects, we can delete the queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) kobject_uevent(&q->kobj, KOBJ_REMOVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) kobject_del(&q->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) mutex_unlock(&q->sysfs_dir_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) kobject_put(&disk_to_dev(disk)->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }