^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/blktrace_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/blk-cgroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "../../block/blk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <trace/events/block.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "trace_output.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #ifdef CONFIG_BLK_DEV_IO_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static unsigned int blktrace_seq __read_mostly = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static struct trace_array *blk_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static bool blk_tracer_enabled __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static LIST_HEAD(running_trace_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Select an alternative, minimalistic output than the original one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define TRACE_BLK_OPT_CLASSIC 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define TRACE_BLK_OPT_CGROUP 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define TRACE_BLK_OPT_CGNAME 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static struct tracer_opt blk_tracer_opts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Default disable the minimalistic output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #ifdef CONFIG_BLK_CGROUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static struct tracer_flags blk_tracer_flags = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) .val = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) .opts = blk_tracer_opts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Global reference count of probes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static DEFINE_MUTEX(blk_probe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static int blk_probes_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static void blk_register_tracepoints(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static void blk_unregister_tracepoints(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * Send out a notify message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void trace_note(struct blk_trace *bt, pid_t pid, int action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) const void *data, size_t len, u64 cgid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct blk_io_trace *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct ring_buffer_event *event = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct trace_buffer *buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int pc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) bool blk_tracer = blk_tracer_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (blk_tracer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) buffer = blk_tr->array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) pc = preempt_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) sizeof(*t) + len + cgid_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) 0, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) t = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) goto record_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (!bt->rchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) t->time = ktime_to_ns(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) record_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) t->device = bt->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) t->pid = pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) t->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) t->pdu_len = len + cgid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (cgid_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (blk_tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Send out a notify for this process, if we haven't done so since a trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static void trace_note_tsk(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) tsk->btrace_seq = blktrace_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) spin_lock_irqsave(&running_trace_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) list_for_each_entry(bt, &running_trace_list, running_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) sizeof(tsk->comm), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) spin_unlock_irqrestore(&running_trace_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void trace_note_time(struct blk_trace *bt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct timespec64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u32 words[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* need to check user space to see if this breaks in y2038 or y2106 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ktime_get_real_ts64(&now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) words[0] = (u32)now.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) words[1] = now.tv_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (unlikely(bt->trace_state != Blktrace_running &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) !blk_tracer_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * message to the trace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (!(bt->act_mask & BLK_TC_NOTIFY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) buf = this_cpu_ptr(bt->msg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) blkcg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #ifdef CONFIG_BLK_CGROUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) blkcg ? cgroup_id(blkcg->css.cgroup) : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) trace_note(bt, current->pid, BLK_TN_MESSAGE, buf, n, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) EXPORT_SYMBOL_GPL(__trace_note_message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) pid_t pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (sector && (sector < bt->start_lba || sector > bt->end_lba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (bt->pid && pid != bt->pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Data direction bit lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) BLK_TC_ACT(BLK_TC_WRITE) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define BLK_TC_RAHEAD BLK_TC_AHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define BLK_TC_PREFLUSH BLK_TC_FLUSH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* The ilog2() calls fall out because they're constant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * The worker for the various blk_add_trace*() types. Fills out a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * blk_io_trace structure and places it in a per-cpu subbuffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int op, int op_flags, u32 what, int error, int pdu_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void *pdu_data, u64 cgid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct ring_buffer_event *event = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct trace_buffer *buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct blk_io_trace *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned long *sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int cpu, pc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) bool blk_tracer = blk_tracer_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) what |= ddir_act[op_is_write(op) ? WRITE : READ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) what |= MASK_TC_BIT(op_flags, SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) what |= MASK_TC_BIT(op_flags, RAHEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) what |= MASK_TC_BIT(op_flags, META);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) what |= MASK_TC_BIT(op_flags, PREFLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) what |= MASK_TC_BIT(op_flags, FUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) what |= BLK_TC_ACT(BLK_TC_DISCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (op == REQ_OP_FLUSH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) what |= BLK_TC_ACT(BLK_TC_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (cgid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) what |= __BLK_TA_CGROUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) pid = tsk->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (act_log_check(bt, what, sector, pid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (blk_tracer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) tracing_record_cmdline(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) buffer = blk_tr->array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) pc = preempt_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) sizeof(*t) + pdu_len + cgid_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 0, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) t = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) goto record_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (unlikely(tsk->btrace_seq != blktrace_seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) trace_note_tsk(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * A word about the locking here - we disable interrupts to reserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * some space in the relay per-cpu buffer, to prevent an irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * from coming in and stepping on our toes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) sequence = per_cpu_ptr(bt->sequence, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) t->sequence = ++(*sequence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) t->time = ktime_to_ns(ktime_get());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) record_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * These two are not needed in ftrace as they are in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * generic trace_entry, filled by tracing_generic_entry_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * but for the trace_event->bin() synthesizer benefit we do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * here too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) t->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) t->pid = pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) t->sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) t->bytes = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) t->action = what;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) t->device = bt->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) t->error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) t->pdu_len = pdu_len + cgid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (cgid_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (pdu_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (blk_tracer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void blk_trace_free(struct blk_trace *bt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) debugfs_remove(bt->msg_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) debugfs_remove(bt->dropped_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) relay_close(bt->rchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) debugfs_remove(bt->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) free_percpu(bt->sequence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) free_percpu(bt->msg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) kfree(bt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static void get_probe_ref(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) mutex_lock(&blk_probe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (++blk_probes_ref == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) blk_register_tracepoints();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) mutex_unlock(&blk_probe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static void put_probe_ref(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) mutex_lock(&blk_probe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!--blk_probes_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) blk_unregister_tracepoints();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) mutex_unlock(&blk_probe_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static void blk_trace_cleanup(struct blk_trace *bt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) blk_trace_free(bt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) put_probe_ref();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static int __blk_trace_remove(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) bt = rcu_replace_pointer(q->blk_trace, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) lockdep_is_held(&q->debugfs_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (!bt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (bt->trace_state != Blktrace_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) blk_trace_cleanup(bt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int blk_trace_remove(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) mutex_lock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ret = __blk_trace_remove(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) mutex_unlock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) EXPORT_SYMBOL_GPL(blk_trace_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct blk_trace *bt = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) char buf[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static const struct file_operations blk_dropped_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) .open = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) .read = blk_dropped_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) .llseek = default_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) char *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (count >= BLK_TN_MAX_MSG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) msg = memdup_user_nul(buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (IS_ERR(msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return PTR_ERR(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) bt = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) __trace_note_message(bt, NULL, "%s", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) kfree(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static const struct file_operations blk_msg_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) .open = simple_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) .write = blk_msg_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) .llseek = noop_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * Keep track of how many times we encountered a full subbuffer, to aid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * the user space app in telling how many lost events there were.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) void *prev_subbuf, size_t prev_padding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (!relay_buf_full(buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) bt = buf->chan->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) atomic_inc(&bt->dropped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static int blk_remove_buf_file_callback(struct dentry *dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) debugfs_remove(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static struct dentry *blk_create_buf_file_callback(const char *filename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct dentry *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) umode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct rchan_buf *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int *is_global)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return debugfs_create_file(filename, mode, parent, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) &relay_file_operations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static struct rchan_callbacks blk_relay_callbacks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) .subbuf_start = blk_subbuf_start_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) .create_buf_file = blk_create_buf_file_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) .remove_buf_file = blk_remove_buf_file_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void blk_trace_setup_lba(struct blk_trace *bt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct block_device *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct hd_struct *part = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) part = bdev->bd_part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (part) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) bt->start_lba = part->start_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) bt->end_lba = part->start_sect + part->nr_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) bt->start_lba = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) bt->end_lba = -1ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * Setup everything required to start tracing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct blk_user_trace_setup *buts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct blk_trace *bt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct dentry *dir = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) lockdep_assert_held(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (!buts->buf_size || !buts->buf_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * some device names have larger paths - convert the slashes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * to underscores for this to work as expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) strreplace(buts->name, '/', '_');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * bdev can be NULL, as with scsi-generic, this is a helpful as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * we can be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (rcu_dereference_protected(q->blk_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) lockdep_is_held(&q->debugfs_mutex))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) pr_warn("Concurrent blktraces are not allowed on %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) buts->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) bt = kzalloc(sizeof(*bt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (!bt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) bt->sequence = alloc_percpu(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (!bt->sequence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (!bt->msg_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * When tracing the whole disk reuse the existing debugfs directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * created by the block layer on init. For partitions block devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * and scsi-generic block devices we create a temporary new debugfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * directory that will be removed once the trace ends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (bdev && !bdev_is_partition(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) dir = q->debugfs_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * As blktrace relies on debugfs for its interface the debugfs directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * is required, contrary to the usual mantra of not checking for debugfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * files or directories.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (IS_ERR_OR_NULL(dir)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) pr_warn("debugfs_dir not present for %s so skipping\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) buts->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) bt->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) atomic_set(&bt->dropped, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) INIT_LIST_HEAD(&bt->running_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) &blk_dropped_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) bt->rchan = relay_open("trace", dir, buts->buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) buts->buf_nr, &blk_relay_callbacks, bt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (!bt->rchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) bt->act_mask = buts->act_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (!bt->act_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) bt->act_mask = (u16) -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) blk_trace_setup_lba(bt, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* overwrite with user settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (buts->start_lba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) bt->start_lba = buts->start_lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (buts->end_lba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) bt->end_lba = buts->end_lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) bt->pid = buts->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) bt->trace_state = Blktrace_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) rcu_assign_pointer(q->blk_trace, bt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) get_probe_ref();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) blk_trace_free(bt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct block_device *bdev, char __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct blk_user_trace_setup buts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ret = copy_from_user(&buts, arg, sizeof(buts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (copy_to_user(arg, &buts, sizeof(buts))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) __blk_trace_remove(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) char __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) mutex_lock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ret = __blk_trace_setup(q, name, dev, bdev, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) mutex_unlock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) EXPORT_SYMBOL_GPL(blk_trace_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static int compat_blk_trace_setup(struct request_queue *q, char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) dev_t dev, struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) char __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct blk_user_trace_setup buts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct compat_blk_user_trace_setup cbuts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) buts = (struct blk_user_trace_setup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) .act_mask = cbuts.act_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) .buf_size = cbuts.buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) .buf_nr = cbuts.buf_nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) .start_lba = cbuts.start_lba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) .end_lba = cbuts.end_lba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) .pid = cbuts.pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) __blk_trace_remove(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static int __blk_trace_startstop(struct request_queue *q, int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) bt = rcu_dereference_protected(q->blk_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) lockdep_is_held(&q->debugfs_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (bt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * For starting a trace, we can transition from a setup or stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * trace. For stopping a trace, the state must be running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (bt->trace_state == Blktrace_setup ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) bt->trace_state == Blktrace_stopped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) blktrace_seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) bt->trace_state = Blktrace_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) spin_lock_irq(&running_trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) list_add(&bt->running_list, &running_trace_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) spin_unlock_irq(&running_trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) trace_note_time(bt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (bt->trace_state == Blktrace_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) bt->trace_state = Blktrace_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) spin_lock_irq(&running_trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) list_del_init(&bt->running_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) spin_unlock_irq(&running_trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) relay_flush(bt->rchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) int blk_trace_startstop(struct request_queue *q, int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) mutex_lock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ret = __blk_trace_startstop(q, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) mutex_unlock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) EXPORT_SYMBOL_GPL(blk_trace_startstop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * When reading or writing the blktrace sysfs files, the references to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * opened sysfs or device files should prevent the underlying block device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * from being removed. So no further delete protection is really needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * blk_trace_ioctl: - handle the ioctls associated with tracing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * @bdev: the block device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * @cmd: the ioctl cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * @arg: the argument data, if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) int ret, start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) char b[BDEVNAME_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) q = bdev_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) mutex_lock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) case BLKTRACESETUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) bdevname(bdev, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) case BLKTRACESETUP32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) bdevname(bdev, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) case BLKTRACESTART:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) start = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) case BLKTRACESTOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) ret = __blk_trace_startstop(q, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) case BLKTRACETEARDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ret = __blk_trace_remove(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ret = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) mutex_unlock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * blk_trace_shutdown: - stop and cleanup trace structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * @q: the request queue associated with the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) void blk_trace_shutdown(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) mutex_lock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (rcu_dereference_protected(q->blk_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) lockdep_is_held(&q->debugfs_mutex))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) __blk_trace_startstop(q, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) __blk_trace_remove(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) mutex_unlock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) #ifdef CONFIG_BLK_CGROUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* We don't use the 'bt' value here except as an optimization... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) bt = rcu_dereference_protected(q->blk_trace, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (!bio->bi_blkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return cgroup_id(bio_blkcg(bio)->css.cgroup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (!rq->bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /* Use the first bio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return blk_trace_bio_get_cgid(q, rq->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * blktrace probes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * blk_add_trace_rq - Add a trace for a request oriented action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * @rq: the source request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * @error: return status to log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * @nr_bytes: number of completed bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * @what: the action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * @cgid: the cgroup info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * Records an action against a request. Will log the bio offset + size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static void blk_add_trace_rq(struct request *rq, int error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) unsigned int nr_bytes, u32 what, u64 cgid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) bt = rcu_dereference(rq->q->blk_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (likely(!bt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (blk_rq_is_passthrough(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) what |= BLK_TC_ACT(BLK_TC_PC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) what |= BLK_TC_ACT(BLK_TC_FS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) rq->cmd_flags, what, error, 0, NULL, cgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) static void blk_add_trace_rq_insert(void *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct request_queue *q, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) blk_trace_request_get_cgid(q, rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) static void blk_add_trace_rq_issue(void *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct request_queue *q, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) blk_trace_request_get_cgid(q, rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) static void blk_add_trace_rq_merge(void *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct request_queue *q, struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) blk_trace_request_get_cgid(q, rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static void blk_add_trace_rq_requeue(void *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) blk_trace_request_get_cgid(q, rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) int error, unsigned int nr_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) blk_trace_request_get_cgid(rq->q, rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * blk_add_trace_bio - Add a trace for a bio oriented action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * @q: queue the io is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * @bio: the source bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * @what: the action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * @error: error, if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * Records an action against a bio. Will log the bio offset + size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) u32 what, int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) bt = rcu_dereference(q->blk_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (likely(!bt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) bio_op(bio), bio->bi_opf, what, error, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) blk_trace_bio_get_cgid(q, bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) static void blk_add_trace_bio_bounce(void *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct request_queue *q, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static void blk_add_trace_bio_complete(void *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct request_queue *q, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) blk_status_to_errno(bio->bi_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) static void blk_add_trace_bio_backmerge(void *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static void blk_add_trace_bio_frontmerge(void *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) static void blk_add_trace_bio_queue(void *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct request_queue *q, struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static void blk_add_trace_getrq(void *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct bio *bio, int rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) bt = rcu_dereference(q->blk_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (bt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) static void blk_add_trace_sleeprq(void *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct bio *bio, int rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) bt = rcu_dereference(q->blk_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (bt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) 0, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) static void blk_add_trace_plug(void *ignore, struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) bt = rcu_dereference(q->blk_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (bt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) unsigned int depth, bool explicit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) bt = rcu_dereference(q->blk_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (bt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) __be64 rpdu = cpu_to_be64(depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) u32 what;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (explicit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) what = BLK_TA_UNPLUG_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) what = BLK_TA_UNPLUG_TIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static void blk_add_trace_split(void *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct request_queue *q, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) unsigned int pdu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) bt = rcu_dereference(q->blk_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (bt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) __be64 rpdu = cpu_to_be64(pdu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) __blk_add_trace(bt, bio->bi_iter.bi_sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) BLK_TA_SPLIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) blk_status_to_errno(bio->bi_status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) sizeof(rpdu), &rpdu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) blk_trace_bio_get_cgid(q, bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * @ignore: trace callback data parameter (not used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * @q: queue the io is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * @bio: the source bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * @dev: target device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * @from: source sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * Device mapper or raid target sometimes need to split a bio because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * it spans a stripe (or similar). Add a trace for that action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static void blk_add_trace_bio_remap(void *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct request_queue *q, struct bio *bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) dev_t dev, sector_t from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct blk_io_trace_remap r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) bt = rcu_dereference(q->blk_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (likely(!bt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) r.device_from = cpu_to_be32(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) r.device_to = cpu_to_be32(bio_dev(bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) r.sector_from = cpu_to_be64(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) bio_op(bio), bio->bi_opf, BLK_TA_REMAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) blk_status_to_errno(bio->bi_status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * blk_add_trace_rq_remap - Add a trace for a request-remap operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * @ignore: trace callback data parameter (not used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * @q: queue the io is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * @rq: the source request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * @dev: target device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * @from: source sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * Device mapper remaps request to other devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * Add a trace for that action.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static void blk_add_trace_rq_remap(void *ignore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) struct request *rq, dev_t dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) sector_t from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) struct blk_io_trace_remap r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) bt = rcu_dereference(q->blk_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (likely(!bt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) r.device_from = cpu_to_be32(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) r.sector_from = cpu_to_be64(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * blk_add_driver_data - Add binary message with driver-specific data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * @q: queue the io is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * @rq: io request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * @data: driver-specific data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * @len: length of driver-specific data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * Some drivers might want to write driver-specific data per request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) void blk_add_driver_data(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) void *data, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) bt = rcu_dereference(q->blk_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (likely(!bt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) BLK_TA_DRV_DATA, 0, len, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) blk_trace_request_get_cgid(q, rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) EXPORT_SYMBOL_GPL(blk_add_driver_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) static void blk_register_tracepoints(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) ret = register_trace_block_plug(blk_add_trace_plug, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ret = register_trace_block_split(blk_add_trace_split, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) static void blk_unregister_tracepoints(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) unregister_trace_block_split(blk_add_trace_split, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) unregister_trace_block_plug(blk_add_trace_plug, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) tracepoint_synchronize_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * struct blk_io_tracer formatting routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) int tc = t->action >> BLK_TC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) rwbs[i++] = 'N';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (tc & BLK_TC_FLUSH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) rwbs[i++] = 'F';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (tc & BLK_TC_DISCARD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) rwbs[i++] = 'D';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) else if (tc & BLK_TC_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) rwbs[i++] = 'W';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) else if (t->bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) rwbs[i++] = 'R';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) rwbs[i++] = 'N';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (tc & BLK_TC_FUA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) rwbs[i++] = 'F';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (tc & BLK_TC_AHEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) rwbs[i++] = 'A';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (tc & BLK_TC_SYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) rwbs[i++] = 'S';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (tc & BLK_TC_META)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) rwbs[i++] = 'M';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) rwbs[i] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return (const struct blk_io_trace *)ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) static inline u64 t_cgid(const struct trace_entry *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return *(u64 *)(te_blk_io_trace(ent) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static inline u32 t_action(const struct trace_entry *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) return te_blk_io_trace(ent)->action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) static inline u32 t_bytes(const struct trace_entry *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return te_blk_io_trace(ent)->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static inline u32 t_sec(const struct trace_entry *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return te_blk_io_trace(ent)->bytes >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) static inline unsigned long long t_sector(const struct trace_entry *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return te_blk_io_trace(ent)->sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static inline __u16 t_error(const struct trace_entry *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) return te_blk_io_trace(ent)->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) const __be64 *val = pdu_start(ent, has_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return be64_to_cpu(*val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) bool has_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) bool has_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) char rwbs[RWBS_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) unsigned long long ts = iter->ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) unsigned secs = (unsigned long)ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) fill_rwbs(rwbs, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) trace_seq_printf(&iter->seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) MAJOR(t->device), MINOR(t->device), iter->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) secs, nsec_rem, iter->ent->pid, act, rwbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) static void blk_log_action(struct trace_iterator *iter, const char *act,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) bool has_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) char rwbs[RWBS_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) fill_rwbs(rwbs, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (has_cg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) u64 id = t_cgid(iter->ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) char blkcg_name_buf[NAME_MAX + 1] = "<...>";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) cgroup_path_from_kernfs_id(id, blkcg_name_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) sizeof(blkcg_name_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) MAJOR(t->device), MINOR(t->device),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) blkcg_name_buf, act, rwbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * The cgid portion used to be "INO,GEN". Userland
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * builds a FILEID_INO32_GEN fid out of them and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * opens the cgroup using open_by_handle_at(2).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * While 32bit ino setups are still the same, 64bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * ones now use the 64bit ino as the whole ID and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * no longer use generation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * Regarldess of the content, always output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * be mapped back to @id on both 64 and 32bit ino
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * setups. See __kernfs_fh_to_dentry().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) trace_seq_printf(&iter->seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) "%3d,%-3d %llx,%-llx %2s %3s ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) MAJOR(t->device), MINOR(t->device),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) id & U32_MAX, id >> 32, act, rwbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) MAJOR(t->device), MINOR(t->device), act, rwbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static void blk_log_dump_pdu(struct trace_seq *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) const struct trace_entry *ent, bool has_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) const unsigned char *pdu_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) int pdu_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) int i, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) pdu_buf = pdu_start(ent, has_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) pdu_len = pdu_real_len(ent, has_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (!pdu_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /* find the last zero that needs to be printed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) for (end = pdu_len - 1; end >= 0; end--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (pdu_buf[end])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) end++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) trace_seq_putc(s, '(');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) for (i = 0; i < pdu_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) trace_seq_printf(s, "%s%02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) i == 0 ? "" : " ", pdu_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * stop when the rest is just zeroes and indicate so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * with a ".." appended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (i == end && end != pdu_len - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) trace_seq_puts(s, " ..) ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) trace_seq_puts(s, ") ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) char cmd[TASK_COMM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) trace_find_cmdline(ent->pid, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) trace_seq_printf(s, "%u ", t_bytes(ent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) blk_log_dump_pdu(s, ent, has_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) trace_seq_printf(s, "[%s]\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (t_sec(ent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) trace_seq_printf(s, "%llu + %u [%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) t_sector(ent), t_sec(ent), cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) trace_seq_printf(s, "[%s]\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) static void blk_log_with_error(struct trace_seq *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) const struct trace_entry *ent, bool has_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) blk_log_dump_pdu(s, ent, has_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) trace_seq_printf(s, "[%d]\n", t_error(ent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (t_sec(ent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) trace_seq_printf(s, "%llu + %u [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) t_sector(ent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) t_sec(ent), t_error(ent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) trace_seq_printf(s, "%llu [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) t_sector(ent), t_error(ent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) t_sector(ent), t_sec(ent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) MAJOR(be32_to_cpu(__r->device_from)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) MINOR(be32_to_cpu(__r->device_from)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) be64_to_cpu(__r->sector_from));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) char cmd[TASK_COMM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) trace_find_cmdline(ent->pid, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) trace_seq_printf(s, "[%s]\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) char cmd[TASK_COMM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) trace_find_cmdline(ent->pid, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) char cmd[TASK_COMM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) trace_find_cmdline(ent->pid, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) get_pdu_int(ent, has_cg), cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) bool has_cg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) trace_seq_putmem(s, pdu_start(ent, has_cg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) pdu_real_len(ent, has_cg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) trace_seq_putc(s, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * struct tracer operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) static void blk_tracer_print_header(struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) "# | | | | | |\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) static void blk_tracer_start(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) blk_tracer_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) static int blk_tracer_init(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) blk_tr = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) blk_tracer_start(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) static void blk_tracer_stop(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) blk_tracer_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) static void blk_tracer_reset(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) blk_tracer_stop(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) const char *act[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) void (*print)(struct trace_seq *s, const struct trace_entry *ent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) bool has_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) } what2act[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) static enum print_line_t print_one_line(struct trace_iterator *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) bool classic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) struct trace_array *tr = iter->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct trace_seq *s = &iter->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) const struct blk_io_trace *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) u16 what;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) bool long_act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) blk_log_action_t *log_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) bool has_cg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) t = te_blk_io_trace(iter->ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) log_action = classic ? &blk_log_action_classic : &blk_log_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) has_cg = t->action & __BLK_TA_CGROUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) log_action(iter, long_act ? "message" : "m", has_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) blk_log_msg(s, iter->ent, has_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) return trace_handle_return(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) trace_seq_printf(s, "Unknown action %x\n", what);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) log_action(iter, what2act[what].act[long_act], has_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) what2act[what].print(s, iter->ent, has_cg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) return trace_handle_return(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) int flags, struct trace_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) return print_one_line(iter, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) struct trace_seq *s = &iter->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) const int offset = offsetof(struct blk_io_trace, sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) struct blk_io_trace old = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) .time = iter->ts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) trace_seq_putmem(s, &old, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) trace_seq_putmem(s, &t->sector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) sizeof(old) - offset + t->pdu_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) static enum print_line_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) struct trace_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) blk_trace_synthesize_old_trace(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) return trace_handle_return(&iter->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) return TRACE_TYPE_UNHANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return print_one_line(iter, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) /* don't output context-info for blk_classic output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (bit == TRACE_BLK_OPT_CLASSIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) if (set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) static struct tracer blk_tracer __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) .name = "blk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) .init = blk_tracer_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) .reset = blk_tracer_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) .start = blk_tracer_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) .stop = blk_tracer_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) .print_header = blk_tracer_print_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) .print_line = blk_tracer_print_line,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) .flags = &blk_tracer_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) .set_flag = blk_tracer_set_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static struct trace_event_functions trace_blk_event_funcs = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) .trace = blk_trace_event_print,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) .binary = blk_trace_event_print_binary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) static struct trace_event trace_blk_event = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) .type = TRACE_BLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) .funcs = &trace_blk_event_funcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) static int __init init_blk_tracer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (!register_trace_event(&trace_blk_event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) pr_warn("Warning: could not register block events\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (register_tracer(&blk_tracer) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) pr_warn("Warning: could not register the block tracer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) unregister_trace_event(&trace_blk_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) device_initcall(init_blk_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) static int blk_trace_remove_queue(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) bt = rcu_replace_pointer(q->blk_trace, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) lockdep_is_held(&q->debugfs_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (bt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) if (bt->trace_state == Blktrace_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) bt->trace_state = Blktrace_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) spin_lock_irq(&running_trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) list_del_init(&bt->running_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) spin_unlock_irq(&running_trace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) relay_flush(bt->rchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) put_probe_ref();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) blk_trace_free(bt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) * Setup everything required to start tracing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) static int blk_trace_setup_queue(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) struct block_device *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) struct blk_trace *bt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) bt = kzalloc(sizeof(*bt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (!bt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (!bt->msg_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) goto free_bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) bt->dev = bdev->bd_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) bt->act_mask = (u16)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) blk_trace_setup_lba(bt, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) rcu_assign_pointer(q->blk_trace, bt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) get_probe_ref();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) free_bt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) blk_trace_free(bt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * sysfs interface to enable and configure tracing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) const char *buf, size_t count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) #define BLK_TRACE_DEVICE_ATTR(_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) sysfs_blk_trace_attr_show, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) sysfs_blk_trace_attr_store)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) static BLK_TRACE_DEVICE_ATTR(enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) static BLK_TRACE_DEVICE_ATTR(act_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) static BLK_TRACE_DEVICE_ATTR(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) static BLK_TRACE_DEVICE_ATTR(start_lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) static BLK_TRACE_DEVICE_ATTR(end_lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) static struct attribute *blk_trace_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) &dev_attr_enable.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) &dev_attr_act_mask.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) &dev_attr_pid.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) &dev_attr_start_lba.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) &dev_attr_end_lba.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct attribute_group blk_trace_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) .name = "trace",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) .attrs = blk_trace_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) const char *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) } mask_maps[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) { BLK_TC_READ, "read" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) { BLK_TC_WRITE, "write" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) { BLK_TC_FLUSH, "flush" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) { BLK_TC_SYNC, "sync" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) { BLK_TC_QUEUE, "queue" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) { BLK_TC_REQUEUE, "requeue" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) { BLK_TC_ISSUE, "issue" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) { BLK_TC_COMPLETE, "complete" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) { BLK_TC_FS, "fs" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) { BLK_TC_PC, "pc" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) { BLK_TC_NOTIFY, "notify" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) { BLK_TC_AHEAD, "ahead" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) { BLK_TC_META, "meta" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) { BLK_TC_DISCARD, "discard" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) { BLK_TC_DRV_DATA, "drv_data" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) { BLK_TC_FUA, "fua" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) static int blk_trace_str2mask(const char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) int mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) char *buf, *s, *token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) buf = kstrdup(str, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (buf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) s = strstrip(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) token = strsep(&s, ",");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (token == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (*token == '\0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (strcasecmp(token, mask_maps[i].str) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) mask |= mask_maps[i].mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (i == ARRAY_SIZE(mask_maps)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) mask = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) static ssize_t blk_trace_mask2str(char *buf, int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) char *p = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (mask & mask_maps[i].mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) p += sprintf(p, "%s%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) (p == buf) ? "" : ",", mask_maps[i].str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) *p++ = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) return p - buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) if (bdev->bd_disk == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) return bdev_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) struct block_device *bdev = bdget_part(dev_to_part(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) ssize_t ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (bdev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) q = blk_trace_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (q == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) goto out_bdput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) mutex_lock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) bt = rcu_dereference_protected(q->blk_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) lockdep_is_held(&q->debugfs_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (attr == &dev_attr_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) ret = sprintf(buf, "%u\n", !!bt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) goto out_unlock_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) if (bt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) ret = sprintf(buf, "disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) else if (attr == &dev_attr_act_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) ret = blk_trace_mask2str(buf, bt->act_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) else if (attr == &dev_attr_pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) ret = sprintf(buf, "%u\n", bt->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) else if (attr == &dev_attr_start_lba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) ret = sprintf(buf, "%llu\n", bt->start_lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) else if (attr == &dev_attr_end_lba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) ret = sprintf(buf, "%llu\n", bt->end_lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) out_unlock_bdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) mutex_unlock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) out_bdput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) bdput(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) struct block_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) struct blk_trace *bt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) ssize_t ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if (attr == &dev_attr_act_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) if (kstrtoull(buf, 0, &value)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) /* Assume it is a list of trace category names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) ret = blk_trace_str2mask(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) value = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) } else if (kstrtoull(buf, 0, &value))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) bdev = bdget_part(dev_to_part(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (bdev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) q = blk_trace_get_queue(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (q == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) goto out_bdput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) mutex_lock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) bt = rcu_dereference_protected(q->blk_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) lockdep_is_held(&q->debugfs_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (attr == &dev_attr_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) if (!!value == !!bt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) goto out_unlock_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) ret = blk_trace_setup_queue(q, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) ret = blk_trace_remove_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) goto out_unlock_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (bt == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) ret = blk_trace_setup_queue(q, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) bt = rcu_dereference_protected(q->blk_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) lockdep_is_held(&q->debugfs_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) if (attr == &dev_attr_act_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) bt->act_mask = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) else if (attr == &dev_attr_pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) bt->pid = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) else if (attr == &dev_attr_start_lba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) bt->start_lba = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) else if (attr == &dev_attr_end_lba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) bt->end_lba = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) out_unlock_bdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) mutex_unlock(&q->debugfs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) out_bdput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) bdput(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) int blk_trace_init_sysfs(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) void blk_trace_remove_sysfs(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) #endif /* CONFIG_BLK_DEV_IO_TRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) #ifdef CONFIG_EVENT_TRACING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (op & REQ_PREFLUSH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) rwbs[i++] = 'F';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) switch (op & REQ_OP_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) case REQ_OP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) case REQ_OP_WRITE_SAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) rwbs[i++] = 'W';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) case REQ_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) rwbs[i++] = 'D';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) case REQ_OP_SECURE_ERASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) rwbs[i++] = 'D';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) rwbs[i++] = 'E';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) case REQ_OP_FLUSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) rwbs[i++] = 'F';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) case REQ_OP_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) rwbs[i++] = 'R';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) rwbs[i++] = 'N';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (op & REQ_FUA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) rwbs[i++] = 'F';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if (op & REQ_RAHEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) rwbs[i++] = 'A';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (op & REQ_SYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) rwbs[i++] = 'S';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) if (op & REQ_META)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) rwbs[i++] = 'M';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) rwbs[i] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) EXPORT_SYMBOL_GPL(blk_fill_rwbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) #endif /* CONFIG_EVENT_TRACING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)