^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2017 Facebook
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "blk-mq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "blk-mq-debugfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "blk-mq-tag.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "blk-rq-qos.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) if (stat->nr_samples) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) stat->nr_samples, stat->mean, stat->min, stat->max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) seq_puts(m, "samples=0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static int queue_poll_stat_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct request_queue *q = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) int bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) print_stat(m, &q->poll_stat[2 * bucket]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) seq_puts(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) print_stat(m, &q->poll_stat[2 * bucket + 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) seq_puts(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) __acquires(&q->requeue_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct request_queue *q = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) spin_lock_irq(&q->requeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return seq_list_start(&q->requeue_list, *pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct request_queue *q = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return seq_list_next(v, &q->requeue_list, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static void queue_requeue_list_stop(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) __releases(&q->requeue_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct request_queue *q = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) spin_unlock_irq(&q->requeue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static const struct seq_operations queue_requeue_list_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) .start = queue_requeue_list_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) .next = queue_requeue_list_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) .stop = queue_requeue_list_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) .show = blk_mq_debugfs_rq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static int blk_flags_show(struct seq_file *m, const unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) const char *const *flag_name, int flag_name_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) bool sep = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!(flags & BIT(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (sep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) seq_puts(m, "|");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) sep = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (i < flag_name_count && flag_name[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) seq_puts(m, flag_name[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) seq_printf(m, "%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static int queue_pm_only_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct request_queue *q = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) seq_printf(m, "%d\n", atomic_read(&q->pm_only));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static const char *const blk_queue_flag_name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) QUEUE_FLAG_NAME(STOPPED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) QUEUE_FLAG_NAME(DYING),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) QUEUE_FLAG_NAME(NOMERGES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) QUEUE_FLAG_NAME(SAME_COMP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) QUEUE_FLAG_NAME(FAIL_IO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) QUEUE_FLAG_NAME(NONROT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) QUEUE_FLAG_NAME(IO_STAT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) QUEUE_FLAG_NAME(DISCARD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) QUEUE_FLAG_NAME(NOXMERGES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) QUEUE_FLAG_NAME(ADD_RANDOM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) QUEUE_FLAG_NAME(SECERASE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) QUEUE_FLAG_NAME(SAME_FORCE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) QUEUE_FLAG_NAME(DEAD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) QUEUE_FLAG_NAME(INIT_DONE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) QUEUE_FLAG_NAME(STABLE_WRITES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) QUEUE_FLAG_NAME(POLL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) QUEUE_FLAG_NAME(WC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) QUEUE_FLAG_NAME(FUA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) QUEUE_FLAG_NAME(DAX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) QUEUE_FLAG_NAME(STATS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) QUEUE_FLAG_NAME(POLL_STATS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) QUEUE_FLAG_NAME(REGISTERED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) QUEUE_FLAG_NAME(QUIESCED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) QUEUE_FLAG_NAME(PCI_P2PDMA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) QUEUE_FLAG_NAME(ZONE_RESETALL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) QUEUE_FLAG_NAME(HCTX_ACTIVE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) QUEUE_FLAG_NAME(NOWAIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #undef QUEUE_FLAG_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static int queue_state_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct request_queue *q = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) ARRAY_SIZE(blk_queue_flag_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) seq_puts(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static ssize_t queue_state_write(void *data, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct request_queue *q = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) char opbuf[16] = { }, *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * The "state" attribute is removed after blk_cleanup_queue() has called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * triggering a use-after-free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (blk_queue_dead(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (count >= sizeof(opbuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) pr_err("%s: operation too long\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) goto inval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (copy_from_user(opbuf, buf, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) op = strstrip(opbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (strcmp(op, "run") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) blk_mq_run_hw_queues(q, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) } else if (strcmp(op, "start") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) blk_mq_start_stopped_hw_queues(q, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) } else if (strcmp(op, "kick") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) blk_mq_kick_requeue_list(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) pr_err("%s: unsupported operation '%s'\n", __func__, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) inval:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static int queue_write_hint_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct request_queue *q = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static ssize_t queue_write_hint_store(void *data, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct request_queue *q = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) q->write_hints[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) { "poll_stat", 0400, queue_poll_stat_show },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) { "pm_only", 0600, queue_pm_only_show, NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) { "state", 0600, queue_state_show, queue_state_write },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static const char *const hctx_state_name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) HCTX_STATE_NAME(STOPPED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) HCTX_STATE_NAME(TAG_ACTIVE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) HCTX_STATE_NAME(SCHED_RESTART),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) HCTX_STATE_NAME(INACTIVE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #undef HCTX_STATE_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int hctx_state_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) blk_flags_show(m, hctx->state, hctx_state_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ARRAY_SIZE(hctx_state_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) seq_puts(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static const char *const alloc_policy_name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) BLK_TAG_ALLOC_NAME(FIFO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) BLK_TAG_ALLOC_NAME(RR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #undef BLK_TAG_ALLOC_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static const char *const hctx_flag_name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) HCTX_FLAG_NAME(SHOULD_MERGE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) HCTX_FLAG_NAME(BLOCKING),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) HCTX_FLAG_NAME(NO_SCHED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) HCTX_FLAG_NAME(STACKING),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) HCTX_FLAG_NAME(TAG_HCTX_SHARED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #undef HCTX_FLAG_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static int hctx_flags_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) seq_puts(m, "alloc_policy=");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) alloc_policy_name[alloc_policy])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) seq_puts(m, alloc_policy_name[alloc_policy]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) seq_printf(m, "%d", alloc_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) seq_puts(m, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) blk_flags_show(m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) seq_puts(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static const char *const cmd_flag_name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) CMD_FLAG_NAME(FAILFAST_DEV),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) CMD_FLAG_NAME(FAILFAST_TRANSPORT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) CMD_FLAG_NAME(FAILFAST_DRIVER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) CMD_FLAG_NAME(SYNC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) CMD_FLAG_NAME(META),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) CMD_FLAG_NAME(PRIO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) CMD_FLAG_NAME(NOMERGE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) CMD_FLAG_NAME(IDLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) CMD_FLAG_NAME(INTEGRITY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) CMD_FLAG_NAME(FUA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) CMD_FLAG_NAME(PREFLUSH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) CMD_FLAG_NAME(RAHEAD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) CMD_FLAG_NAME(BACKGROUND),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) CMD_FLAG_NAME(NOWAIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) CMD_FLAG_NAME(NOUNMAP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) CMD_FLAG_NAME(HIPRI),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #undef CMD_FLAG_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static const char *const rqf_name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) RQF_NAME(SORTED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) RQF_NAME(STARTED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) RQF_NAME(SOFTBARRIER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) RQF_NAME(FLUSH_SEQ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) RQF_NAME(MIXED_MERGE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) RQF_NAME(MQ_INFLIGHT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) RQF_NAME(DONTPREP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) RQF_NAME(FAILED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) RQF_NAME(QUIET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) RQF_NAME(ELVPRIV),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) RQF_NAME(IO_STAT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) RQF_NAME(ALLOCED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) RQF_NAME(PM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) RQF_NAME(HASHED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) RQF_NAME(STATS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) RQF_NAME(SPECIAL_PAYLOAD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) RQF_NAME(ZONE_WRITE_LOCKED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) RQF_NAME(MQ_POLL_SLEPT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #undef RQF_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static const char *const blk_mq_rq_state_name_array[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) [MQ_RQ_IDLE] = "idle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) [MQ_RQ_IN_FLIGHT] = "in_flight",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) [MQ_RQ_COMPLETE] = "complete",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (WARN_ON_ONCE((unsigned int)rq_state >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ARRAY_SIZE(blk_mq_rq_state_name_array)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return "(?)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return blk_mq_rq_state_name_array[rq_state];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) const unsigned int op = req_op(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) const char *op_str = blk_op_str(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) seq_printf(m, "%p {.op=", rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (strcmp(op_str, "UNKNOWN") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) seq_printf(m, "%u", op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) seq_printf(m, "%s", op_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) seq_puts(m, ", .cmd_flags=");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ARRAY_SIZE(cmd_flag_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) seq_puts(m, ", .rq_flags=");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ARRAY_SIZE(rqf_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) rq->internal_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (mq_ops->show_rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) mq_ops->show_rq(m, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) seq_puts(m, "}\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) __acquires(&hctx->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct blk_mq_hw_ctx *hctx = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) spin_lock(&hctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return seq_list_start(&hctx->dispatch, *pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct blk_mq_hw_ctx *hctx = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return seq_list_next(v, &hctx->dispatch, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static void hctx_dispatch_stop(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) __releases(&hctx->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct blk_mq_hw_ctx *hctx = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) spin_unlock(&hctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static const struct seq_operations hctx_dispatch_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) .start = hctx_dispatch_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) .next = hctx_dispatch_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) .stop = hctx_dispatch_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) .show = blk_mq_debugfs_rq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct show_busy_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct seq_file *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct blk_mq_hw_ctx *hctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * Note: the state of a request may change while this function is in progress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * keep iterating requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) const struct show_busy_params *params = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (rq->mq_hctx == params->hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) __blk_mq_debugfs_rq_show(params->m, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static int hctx_busy_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct show_busy_params params = { .m = m, .hctx = hctx };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ¶ms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static const char *const hctx_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) [HCTX_TYPE_DEFAULT] = "default",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) [HCTX_TYPE_READ] = "read",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) [HCTX_TYPE_POLL] = "poll",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int hctx_type_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) seq_printf(m, "%s\n", hctx_types[hctx->type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static int hctx_ctx_map_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) sbitmap_bitmap_show(&hctx->ctx_map, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static void blk_mq_debugfs_tags_show(struct seq_file *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct blk_mq_tags *tags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) seq_printf(m, "active_queues=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) atomic_read(&tags->active_queues));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) seq_puts(m, "\nbitmap_tags:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) sbitmap_queue_show(tags->bitmap_tags, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (tags->nr_reserved_tags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) seq_puts(m, "\nbreserved_tags:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) sbitmap_queue_show(tags->breserved_tags, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static int hctx_tags_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct request_queue *q = hctx->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) res = mutex_lock_interruptible(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (hctx->tags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) blk_mq_debugfs_tags_show(m, hctx->tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) mutex_unlock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct request_queue *q = hctx->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) res = mutex_lock_interruptible(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (hctx->tags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) sbitmap_bitmap_show(&hctx->tags->bitmap_tags->sb, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) mutex_unlock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static int hctx_sched_tags_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct request_queue *q = hctx->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) res = mutex_lock_interruptible(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (hctx->sched_tags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) blk_mq_debugfs_tags_show(m, hctx->sched_tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) mutex_unlock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct request_queue *q = hctx->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) res = mutex_lock_interruptible(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (hctx->sched_tags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags->sb, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) mutex_unlock(&q->sysfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static int hctx_io_poll_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) seq_printf(m, "considered=%lu\n", hctx->poll_considered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) seq_printf(m, "success=%lu\n", hctx->poll_success);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static int hctx_dispatched_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) unsigned int d = 1U << (i - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) hctx->dispatched[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static int hctx_queued_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) seq_printf(m, "%lu\n", hctx->queued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) static ssize_t hctx_queued_write(void *data, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) hctx->queued = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static int hctx_run_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) seq_printf(m, "%lu\n", hctx->run);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) hctx->run = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static int hctx_active_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct blk_mq_hw_ctx *hctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) seq_printf(m, "%u\n", hctx->dispatch_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) #define CTX_RQ_SEQ_OPS(name, type) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) __acquires(&ctx->lock) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct blk_mq_ctx *ctx = m->private; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) spin_lock(&ctx->lock); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return seq_list_start(&ctx->rq_lists[type], *pos); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) loff_t *pos) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct blk_mq_ctx *ctx = m->private; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return seq_list_next(v, &ctx->rq_lists[type], pos); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) __releases(&ctx->lock) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct blk_mq_ctx *ctx = m->private; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) spin_unlock(&ctx->lock); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) .start = ctx_##name##_rq_list_start, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) .next = ctx_##name##_rq_list_next, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) .stop = ctx_##name##_rq_list_stop, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) .show = blk_mq_debugfs_rq_show, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static int ctx_dispatched_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct blk_mq_ctx *ctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct blk_mq_ctx *ctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static int ctx_merged_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct blk_mq_ctx *ctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) seq_printf(m, "%lu\n", ctx->rq_merged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static ssize_t ctx_merged_write(void *data, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct blk_mq_ctx *ctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ctx->rq_merged = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static int ctx_completed_show(void *data, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct blk_mq_ctx *ctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static ssize_t ctx_completed_write(void *data, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct blk_mq_ctx *ctx = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static int blk_mq_debugfs_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) const struct blk_mq_debugfs_attr *attr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return attr->show(data, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct seq_file *m = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) const struct blk_mq_debugfs_attr *attr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * Attributes that only implement .seq_ops are read-only and 'attr' is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * the same with 'data' in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (attr == data || !attr->write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return attr->write(data, buf, count, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) const struct blk_mq_debugfs_attr *attr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct seq_file *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (attr->seq_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ret = seq_open(file, attr->seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) m = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) m->private = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (WARN_ON_ONCE(!attr->show))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return single_open(file, blk_mq_debugfs_show, inode->i_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) const struct blk_mq_debugfs_attr *attr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (attr->show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return single_release(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return seq_release(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) static const struct file_operations blk_mq_debugfs_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) .open = blk_mq_debugfs_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) .write = blk_mq_debugfs_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) .release = blk_mq_debugfs_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {"state", 0400, hctx_state_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) {"flags", 0400, hctx_flags_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {"busy", 0400, hctx_busy_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) {"ctx_map", 0400, hctx_ctx_map_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {"tags", 0400, hctx_tags_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {"tags_bitmap", 0400, hctx_tags_bitmap_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) {"sched_tags", 0400, hctx_sched_tags_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {"queued", 0600, hctx_queued_show, hctx_queued_write},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {"run", 0600, hctx_run_show, hctx_run_write},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {"active", 0400, hctx_active_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) {"dispatch_busy", 0400, hctx_dispatch_busy_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {"type", 0400, hctx_type_show},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {"merged", 0600, ctx_merged_show, ctx_merged_write},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {"completed", 0600, ctx_completed_show, ctx_completed_write},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) static void debugfs_create_files(struct dentry *parent, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) const struct blk_mq_debugfs_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (IS_ERR_OR_NULL(parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) d_inode(parent)->i_private = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) for (; attr->name; attr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) debugfs_create_file(attr->name, attr->mode, parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) (void *)attr, &blk_mq_debugfs_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) void blk_mq_debugfs_register(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct blk_mq_hw_ctx *hctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * didn't exist yet (because we don't know what to name the directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * until the queue is registered to a gendisk).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (q->elevator && !q->sched_debugfs_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) blk_mq_debugfs_register_sched(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) queue_for_each_hw_ctx(q, hctx, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (!hctx->debugfs_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) blk_mq_debugfs_register_hctx(q, hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (q->elevator && !hctx->sched_debugfs_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) blk_mq_debugfs_register_sched_hctx(q, hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (q->rq_qos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct rq_qos *rqos = q->rq_qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) while (rqos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) blk_mq_debugfs_register_rqos(rqos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) rqos = rqos->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) void blk_mq_debugfs_unregister(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) q->sched_debugfs_dir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct blk_mq_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct dentry *ctx_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) char name[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) void blk_mq_debugfs_register_hctx(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct blk_mq_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) char name[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) hctx_for_each_ctx(hctx, ctx, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) blk_mq_debugfs_register_ctx(hctx, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) debugfs_remove_recursive(hctx->debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) hctx->sched_debugfs_dir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) hctx->debugfs_dir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) void blk_mq_debugfs_register_hctxs(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct blk_mq_hw_ctx *hctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) queue_for_each_hw_ctx(q, hctx, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) blk_mq_debugfs_register_hctx(q, hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct blk_mq_hw_ctx *hctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) queue_for_each_hw_ctx(q, hctx, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) blk_mq_debugfs_unregister_hctx(hctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) void blk_mq_debugfs_register_sched(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) struct elevator_type *e = q->elevator->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * If the parent directory has not been created yet, return, we will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * called again later on and the directory/files will be created then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (!q->debugfs_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (!e->queue_debugfs_attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) void blk_mq_debugfs_unregister_sched(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) debugfs_remove_recursive(q->sched_debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) q->sched_debugfs_dir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) static const char *rq_qos_id_to_name(enum rq_qos_id id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) case RQ_QOS_WBT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return "wbt";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) case RQ_QOS_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return "latency";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) case RQ_QOS_COST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return "cost";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) case RQ_QOS_IOPRIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return "ioprio";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) debugfs_remove_recursive(rqos->debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) rqos->debugfs_dir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) struct request_queue *q = rqos->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) const char *dir_name = rq_qos_id_to_name(rqos->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (!q->rqos_debugfs_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) q->rqos_debugfs_dir = debugfs_create_dir("rqos",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) q->debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) rqos->debugfs_dir = debugfs_create_dir(dir_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) rqos->q->rqos_debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) debugfs_remove_recursive(q->rqos_debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) q->rqos_debugfs_dir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct elevator_type *e = q->elevator->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (!e->hctx_debugfs_attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) hctx->sched_debugfs_dir = debugfs_create_dir("sched",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) hctx->debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) debugfs_create_files(hctx->sched_debugfs_dir, hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) e->hctx_debugfs_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) debugfs_remove_recursive(hctx->sched_debugfs_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) hctx->sched_debugfs_dir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }