^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ring buffer based function tracer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Originally taken from the RT patch by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Arnaldo Carvalho de Melo <acme@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Based on code from the latency_tracer, that is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Copyright (C) 2004-2006 Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright (C) 2004 Nadia Yvette Chambers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/ring_buffer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <generated/utsrelease.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/irqflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/tracefs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/splice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/sched/rt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/fsnotify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/irq_work.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <trace/hooks/ftrace_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include "trace_output.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * On boot up, the ring buffer is set to the minimum size, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * we do not waste memory on systems that are not using tracing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) bool ring_buffer_expanded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * We need to change this state when a selftest is running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * A selftest will lurk into the ring-buffer to count the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * entries inserted during the selftest although some concurrent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * insertions into the ring-buffer such as trace_printk could occurred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * at the same time, giving false positive or negative results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static bool __read_mostly tracing_selftest_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * If boot-time tracing including tracers/events via kernel cmdline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * is running, we do not want to run SELFTEST.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) bool __read_mostly tracing_selftest_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #ifdef CONFIG_FTRACE_STARTUP_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) void __init disable_tracing_selftest(const char *reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!tracing_selftest_disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) tracing_selftest_disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) pr_info("Ftrace startup test is disabled due to %s\n", reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* Pipe tracepoints to printk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct trace_iterator *tracepoint_print_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int tracepoint_printk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* For tracers that don't implement custom flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static struct tracer_opt dummy_tracer_opt[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * To prevent the comm cache from being overwritten when no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * tracing is active, only save the comm when a trace event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static DEFINE_PER_CPU(bool, trace_taskinfo_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * Kill all tracing for good (never come back).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * It is initialized to 1 but will turn to zero if the initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * of the tracer is successful. But that is the only place that sets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * this back to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int tracing_disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) cpumask_var_t __read_mostly tracing_buffer_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * is set, then ftrace_dump is called. This will output the contents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * of the ftrace buffers to the console. This is very useful for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * capturing traces that lead to crashes and outputing it to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * serial console.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * It is default off, but you can enable it with either specifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * "ftrace_dump_on_oops" in the kernel command line, or setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * /proc/sys/kernel/ftrace_dump_on_oops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * Set 1 if you want to dump buffers of all CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * Set 2 if you want to dump the buffer of the CPU that triggered oops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) enum ftrace_dump_mode ftrace_dump_on_oops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* When set, tracing will stop when a WARN*() is hit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int __disable_trace_on_warning;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #ifdef CONFIG_TRACE_EVAL_MAP_FILE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* Map of enums to their values, for "eval_map" file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct trace_eval_map_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct module *mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) unsigned long length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) union trace_eval_map_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct trace_eval_map_tail {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * "end" is first and points to NULL as it must be different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * than "mod" or "eval_string"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) union trace_eval_map_item *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) const char *end; /* points to NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static DEFINE_MUTEX(trace_eval_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * The trace_eval_maps are saved in an array with two extra elements,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * one at the beginning, and one at the end. The beginning item contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * the count of the saved maps (head.length), and the module they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * belong to if not built in (head.mod). The ending item contains a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * pointer to the next array of saved eval_map items.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) union trace_eval_map_item {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct trace_eval_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct trace_eval_map_head head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct trace_eval_map_tail tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static union trace_eval_map_item *trace_eval_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int tracing_set_tracer(struct trace_array *tr, const char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void ftrace_trace_userstack(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned long flags, int pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define MAX_TRACER_SIZE 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static char *default_bootup_tracer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static bool allocate_snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static int __init set_cmdline_ftrace(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) default_bootup_tracer = bootup_tracer_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* We are using ftrace early, expand it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ring_buffer_expanded = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) __setup("ftrace=", set_cmdline_ftrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static int __init set_ftrace_dump_on_oops(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (*str++ != '=' || !*str) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ftrace_dump_on_oops = DUMP_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!strcmp("orig_cpu", str)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ftrace_dump_on_oops = DUMP_ORIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static int __init stop_trace_on_warning(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) __disable_trace_on_warning = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) __setup("traceoff_on_warning", stop_trace_on_warning);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static int __init boot_alloc_snapshot(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) allocate_snapshot = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* We also need the main ring buffer expanded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ring_buffer_expanded = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) __setup("alloc_snapshot", boot_alloc_snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static int __init set_trace_boot_options(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) __setup("trace_options=", set_trace_boot_options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static char *trace_boot_clock __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static int __init set_trace_boot_clock(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) trace_boot_clock = trace_boot_clock_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) __setup("trace_clock=", set_trace_boot_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static int __init set_tracepoint_printk(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* Ignore the "tp_printk_stop_on_boot" param */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (*str == '_')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) tracepoint_printk = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) __setup("tp_printk", set_tracepoint_printk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unsigned long long ns2usecs(u64 nsec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) nsec += 500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) do_div(nsec, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) trace_process_export(struct trace_export *export,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct ring_buffer_event *event, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct trace_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (export->flags & flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) size = ring_buffer_event_length(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) export->write(export, entry, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static DEFINE_MUTEX(ftrace_export_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static struct trace_export __rcu *ftrace_exports_list __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static inline void ftrace_exports_enable(struct trace_export *export)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (export->flags & TRACE_EXPORT_FUNCTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static_branch_inc(&trace_function_exports_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (export->flags & TRACE_EXPORT_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static_branch_inc(&trace_event_exports_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (export->flags & TRACE_EXPORT_MARKER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static_branch_inc(&trace_marker_exports_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static inline void ftrace_exports_disable(struct trace_export *export)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (export->flags & TRACE_EXPORT_FUNCTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static_branch_dec(&trace_function_exports_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (export->flags & TRACE_EXPORT_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static_branch_dec(&trace_event_exports_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (export->flags & TRACE_EXPORT_MARKER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static_branch_dec(&trace_marker_exports_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static void ftrace_exports(struct ring_buffer_event *event, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct trace_export *export;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) export = rcu_dereference_raw_check(ftrace_exports_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) while (export) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) trace_process_export(export, event, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) export = rcu_dereference_raw_check(export->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) add_trace_export(struct trace_export **list, struct trace_export *export)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) rcu_assign_pointer(export->next, *list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * We are entering export into the list but another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * CPU might be walking that list. We need to make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * the export->next pointer is valid before another CPU sees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * the export pointer included into the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) rcu_assign_pointer(*list, export);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) rm_trace_export(struct trace_export **list, struct trace_export *export)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct trace_export **p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) for (p = list; *p != NULL; p = &(*p)->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (*p == export)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (*p != export)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) rcu_assign_pointer(*p, (*p)->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) add_ftrace_export(struct trace_export **list, struct trace_export *export)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ftrace_exports_enable(export);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) add_trace_export(list, export);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) rm_ftrace_export(struct trace_export **list, struct trace_export *export)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ret = rm_trace_export(list, export);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ftrace_exports_disable(export);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int register_ftrace_export(struct trace_export *export)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (WARN_ON_ONCE(!export->write))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) mutex_lock(&ftrace_export_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) add_ftrace_export(&ftrace_exports_list, export);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) mutex_unlock(&ftrace_export_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) EXPORT_SYMBOL_GPL(register_ftrace_export);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int unregister_ftrace_export(struct trace_export *export)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) mutex_lock(&ftrace_export_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) ret = rm_ftrace_export(&ftrace_exports_list, export);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) mutex_unlock(&ftrace_export_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) EXPORT_SYMBOL_GPL(unregister_ftrace_export);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /* trace_flags holds trace_options default values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #define TRACE_DEFAULT_FLAGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) (FUNCTION_DEFAULT_FLAGS | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* trace_options that are only supported by global_trace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* trace_flags that are default zero for instances */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) #define ZEROED_TRACE_FLAGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * The global_trace is the descriptor that holds the top-level tracing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * buffers for the live tracing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static struct trace_array global_trace = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) .trace_flags = TRACE_DEFAULT_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) LIST_HEAD(ftrace_trace_arrays);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int trace_array_get(struct trace_array *this_tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) list_for_each_entry(tr, &ftrace_trace_arrays, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (tr == this_tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) tr->ref++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static void __trace_array_put(struct trace_array *this_tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) WARN_ON(!this_tr->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) this_tr->ref--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * trace_array_put - Decrement the reference counter for this trace array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * NOTE: Use this when we no longer need the trace array returned by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * trace_array_get_by_name(). This ensures the trace array can be later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * destroyed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) void trace_array_put(struct trace_array *this_tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (!this_tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) __trace_array_put(this_tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) EXPORT_SYMBOL_GPL(trace_array_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int tracing_check_open_get_tr(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ret = security_locked_down(LOCKDOWN_TRACEFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (tracing_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (tr && trace_array_get(tr) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) int call_filter_check_discard(struct trace_event_call *call, void *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct ring_buffer_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) !filter_match_preds(call->filter, rec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) __trace_event_discard_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) void trace_free_pid_list(struct trace_pid_list *pid_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) vfree(pid_list->pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) kfree(pid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * @filtered_pids: The list of pids to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * @search_pid: The PID to find in @filtered_pids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * If pid_max changed after filtered_pids was created, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * by default ignore all pids greater than the previous pid_max.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (search_pid >= filtered_pids->pid_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return test_bit(search_pid, filtered_pids->pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * trace_ignore_this_task - should a task be ignored for tracing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * @filtered_pids: The list of pids to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * @task: The task that should be ignored if not filtered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * Checks if @task should be traced or not from @filtered_pids.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * Returns true if @task should *NOT* be traced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * Returns false if @task should be traced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) trace_ignore_this_task(struct trace_pid_list *filtered_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct trace_pid_list *filtered_no_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * If filterd_no_pids is not empty, and the task's pid is listed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * in filtered_no_pids, then return true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * Otherwise, if filtered_pids is empty, that means we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * trace all tasks. If it has content, then only trace pids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * within filtered_pids.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return (filtered_pids &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) !trace_find_filtered_pid(filtered_pids, task->pid)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) (filtered_no_pids &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) trace_find_filtered_pid(filtered_no_pids, task->pid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * trace_filter_add_remove_task - Add or remove a task from a pid_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * @pid_list: The list to modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * @self: The current task for fork or NULL for exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * @task: The task to add or remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * If adding a task, if @self is defined, the task is only added if @self
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * is also included in @pid_list. This happens on fork and tasks should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * only be added when the parent is listed. If @self is NULL, then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * @task pid will be removed from the list, which would happen on exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * of a task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct task_struct *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (!pid_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* For forks, we only add if the forking task is listed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (self) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (!trace_find_filtered_pid(pid_list, self->pid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* Sorry, but we don't support pid_max changing after setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (task->pid >= pid_list->pid_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /* "self" is set for forks, and NULL for exits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) set_bit(task->pid, pid_list->pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) clear_bit(task->pid, pid_list->pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * @pid_list: The pid list to show
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * @pos: The position of the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * This is used by the seq_file "next" operation to iterate the pids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * listed in a trace_pid_list structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * Returns the pid+1 as we want to display pid of zero, but NULL would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * stop the iteration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) unsigned long pid = (unsigned long)v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) (*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* pid already is +1 of the actual prevous bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /* Return pid + 1 to allow zero to be represented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (pid < pid_list->pid_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return (void *)(pid + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * trace_pid_start - Used for seq_file to start reading pid lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * @pid_list: The pid list to show
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * @pos: The position of the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * This is used by seq_file "start" operation to start the iteration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * of listing pids.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * Returns the pid+1 as we want to display pid of zero, but NULL would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * stop the iteration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) unsigned long pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) loff_t l = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) pid = find_first_bit(pid_list->pids, pid_list->pid_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (pid >= pid_list->pid_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* Return pid + 1 so that zero can be the exit value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) for (pid++; pid && l < *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return (void *)pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * trace_pid_show - show the current pid in seq_file processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * @m: The seq_file structure to write into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * @v: A void pointer of the pid (+1) value to display
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * Can be directly used by seq_file operations to display the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * pid value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int trace_pid_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) unsigned long pid = (unsigned long)v - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) seq_printf(m, "%lu\n", pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /* 128 should be much more than enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) #define PID_BUF_SIZE 127
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) int trace_pid_write(struct trace_pid_list *filtered_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct trace_pid_list **new_pid_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) const char __user *ubuf, size_t cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct trace_pid_list *pid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct trace_parser parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) int nr_pids = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ssize_t read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ssize_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) loff_t pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * Always recreate a new array. The write is an all or nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * operation. Always create a new array when adding new pids by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * the user. If the operation fails, then the current list is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * not modified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (!pid_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) trace_parser_put(&parser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) pid_list->pid_max = READ_ONCE(pid_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* Only truncating will shrink pid_max */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) pid_list->pid_max = filtered_pids->pid_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (!pid_list->pids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) trace_parser_put(&parser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) kfree(pid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (filtered_pids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* copy the current bits to the new max */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) for_each_set_bit(pid, filtered_pids->pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) filtered_pids->pid_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) set_bit(pid, pid_list->pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) nr_pids++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) while (cnt > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ret = trace_get_user(&parser, ubuf, cnt, &pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (ret < 0 || !trace_parser_loaded(&parser))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) read += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ubuf += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) cnt -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (kstrtoul(parser.buffer, 0, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (val >= pid_list->pid_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) pid = (pid_t)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) set_bit(pid, pid_list->pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) nr_pids++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) trace_parser_clear(&parser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) trace_parser_put(&parser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) trace_free_pid_list(pid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (!nr_pids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* Cleared the list of pids */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) trace_free_pid_list(pid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) read = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) pid_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) *new_pid_list = pid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) u64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /* Early boot up does not have a buffer yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (!buf->buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return trace_clock_local();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) ts = ring_buffer_time_stamp(buf->buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) u64 ftrace_now(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return buffer_ftrace_now(&global_trace.array_buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * tracing_is_enabled - Show if global_trace has been disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * Shows if the global trace has been enabled or not. It uses the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * mirror flag "buffer_disabled" to be used in fast paths such as for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * the irqsoff tracer. But it may be inaccurate due to races. If you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * need to know the accurate state, use tracing_is_on() which is a little
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * slower, but accurate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) int tracing_is_enabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * For quick access (irqsoff uses this in fast path), just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * return the mirror variable of the state of the ring buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * It's a little racy, but we don't really care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return !global_trace.buffer_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * trace_buf_size is the size in bytes that is allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * for a buffer. Note, the number of bytes is always rounded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * to page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * This number is purposely set to a low number of 16384.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * If the dump on oops happens, it will be much appreciated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * to not have to wait for all that output. Anyway this can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * boot time and run time configurable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* trace_types holds a link list of available tracers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static struct tracer *trace_types __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * trace_types_lock is used to protect the trace_types list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) DEFINE_MUTEX(trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * serialize the access of the ring buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * ring buffer serializes readers, but it is low level protection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * The validity of the events (which returns by ring_buffer_peek() ..etc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * are not protected by ring buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * The content of events may become garbage if we allow other process consumes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * these events concurrently:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * A) the page of the consumed events may become a normal page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * (not reader page) in ring buffer, and this page will be rewrited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * by events producer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * B) The page of the consumed events may become a page for splice_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * and this page will be returned to system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * These primitives allow multi process access to different cpu ring buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * concurrently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * These primitives don't distinguish read-only and read-consume access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * Multi read-only access are also serialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) static DECLARE_RWSEM(all_cpu_access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) static inline void trace_access_lock(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (cpu == RING_BUFFER_ALL_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* gain it for accessing the whole ring buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) down_write(&all_cpu_access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /* gain it for accessing a cpu ring buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) down_read(&all_cpu_access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* Secondly block other access to this @cpu ring buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) mutex_lock(&per_cpu(cpu_access_lock, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) static inline void trace_access_unlock(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (cpu == RING_BUFFER_ALL_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) up_write(&all_cpu_access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) mutex_unlock(&per_cpu(cpu_access_lock, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) up_read(&all_cpu_access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static inline void trace_access_lock_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) mutex_init(&per_cpu(cpu_access_lock, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) static DEFINE_MUTEX(access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) static inline void trace_access_lock(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) (void)cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) mutex_lock(&access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static inline void trace_access_unlock(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) (void)cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) mutex_unlock(&access_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static inline void trace_access_lock_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) #ifdef CONFIG_STACKTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) static void __ftrace_trace_stack(struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) int skip, int pc, struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static inline void ftrace_trace_stack(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) int skip, int pc, struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) int skip, int pc, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static inline void ftrace_trace_stack(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) int skip, int pc, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static __always_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) trace_event_setup(struct ring_buffer_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) int type, unsigned long flags, int pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) struct trace_entry *ent = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) tracing_generic_entry_update(ent, type, flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) static __always_inline struct ring_buffer_event *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) __trace_buffer_lock_reserve(struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) unsigned long flags, int pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) event = ring_buffer_lock_reserve(buffer, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (event != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) trace_event_setup(event, type, flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) void tracer_tracing_on(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (tr->array_buffer.buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) ring_buffer_record_on(tr->array_buffer.buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * This flag is looked at when buffers haven't been allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * yet, or by some tracers (like irqsoff), that just want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * know if the ring buffer has been disabled, but it can handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * races of where it gets disabled but we still do a record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * As the check is in the fast path of the tracers, it is more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * important to be fast than accurate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) tr->buffer_disabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* Make the flag seen by readers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * tracing_on - enable tracing buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * This function enables tracing buffers that may have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * disabled with tracing_off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) void tracing_on(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) tracer_tracing_on(&global_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) EXPORT_SYMBOL_GPL(tracing_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) static __always_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) __this_cpu_write(trace_taskinfo_save, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /* If this is the temp buffer, we need to commit fully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (this_cpu_read(trace_buffered_event) == event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /* Length is in event->array[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ring_buffer_write(buffer, event->array[0], &event->array[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* Release the temp buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) this_cpu_dec(trace_buffered_event_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) ring_buffer_unlock_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * __trace_puts - write a constant string into the trace buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * @ip: The address of the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * @str: The constant string to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * @size: The size of the string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) int __trace_puts(unsigned long ip, const char *str, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct trace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct print_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) int alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) int pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) pc = preempt_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (unlikely(tracing_selftest_running || tracing_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) alloc = sizeof(*entry) + size + 2; /* possible \n added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) local_save_flags(irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) buffer = global_trace.array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ring_buffer_nest_start(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) irq_flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (!event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) entry->ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) memcpy(&entry->buf, str, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /* Add a newline if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (entry->buf[size - 1] != '\n') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) entry->buf[size] = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) entry->buf[size + 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) entry->buf[size] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) __buffer_unlock_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) ring_buffer_nest_end(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) EXPORT_SYMBOL_GPL(__trace_puts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * __trace_bputs - write the pointer to a constant string into trace buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * @ip: The address of the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * @str: The constant string to write to the buffer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) int __trace_bputs(unsigned long ip, const char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct trace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct bputs_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) int size = sizeof(struct bputs_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) int pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) pc = preempt_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (unlikely(tracing_selftest_running || tracing_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) local_save_flags(irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) buffer = global_trace.array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ring_buffer_nest_start(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) irq_flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) entry->ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) entry->str = str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) __buffer_unlock_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) ring_buffer_nest_end(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) EXPORT_SYMBOL_GPL(__trace_bputs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) #ifdef CONFIG_TRACER_SNAPSHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static void tracing_snapshot_instance_cond(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) void *cond_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) struct tracer *tracer = tr->current_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (in_nmi()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) internal_trace_puts("*** snapshot is being ignored ***\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (!tr->allocated_snapshot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) internal_trace_puts("*** stopping trace here! ***\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) tracing_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /* Note, snapshot can not be used when the tracer uses it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (tracer->use_max_tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) update_max_tr(tr, current, smp_processor_id(), cond_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) void tracing_snapshot_instance(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) tracing_snapshot_instance_cond(tr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * tracing_snapshot - take a snapshot of the current buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * This causes a swap between the snapshot buffer and the current live
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * tracing buffer. You can use this to take snapshots of the live
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * trace when some condition is triggered, but continue to trace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * Note, make sure to allocate the snapshot with either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * a tracing_snapshot_alloc(), or by doing it manually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * with: echo 1 > /sys/kernel/debug/tracing/snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * If the snapshot buffer is not allocated, it will stop tracing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * Basically making a permanent snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) void tracing_snapshot(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct trace_array *tr = &global_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) tracing_snapshot_instance(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) EXPORT_SYMBOL_GPL(tracing_snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * @tr: The tracing instance to snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * @cond_data: The data to be tested conditionally, and possibly saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * This is the same as tracing_snapshot() except that the snapshot is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * conditional - the snapshot will only happen if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * cond_snapshot.update() implementation receiving the cond_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * returns true, which means that the trace array's cond_snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * update() operation used the cond_data to determine whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * snapshot should be taken, and if it was, presumably saved it along
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * with the snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) tracing_snapshot_instance_cond(tr, cond_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * tracing_snapshot_cond_data - get the user data associated with a snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * @tr: The tracing instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * When the user enables a conditional snapshot using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * with the snapshot. This accessor is used to retrieve it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * Should not be called from cond_snapshot.update(), since it takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * the tr->max_lock lock, which the code calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) * cond_snapshot.update() has already done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * Returns the cond_data associated with the trace array's snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) void *tracing_cond_snapshot_data(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) void *cond_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) arch_spin_lock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (tr->cond_snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) cond_data = tr->cond_snapshot->cond_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) arch_spin_unlock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return cond_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct array_buffer *size_buf, int cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) int tracing_alloc_snapshot_instance(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (!tr->allocated_snapshot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /* allocate spare buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) ret = resize_buffer_duplicate_size(&tr->max_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) &tr->array_buffer, RING_BUFFER_ALL_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) tr->allocated_snapshot = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) static void free_snapshot(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * We don't free the ring buffer. instead, resize it because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * The max_tr ring buffer has some state (e.g. ring->clock) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * we want preserve it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) set_buffer_entries(&tr->max_buffer, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) tracing_reset_online_cpus(&tr->max_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) tr->allocated_snapshot = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * tracing_alloc_snapshot - allocate snapshot buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * This only allocates the snapshot buffer if it isn't already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * allocated - it doesn't also take a snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * This is meant to be used in cases where the snapshot buffer needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * to be set up for events that can't sleep but need to be able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * trigger a snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) int tracing_alloc_snapshot(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct trace_array *tr = &global_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) ret = tracing_alloc_snapshot_instance(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) WARN_ON(ret < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * This is similar to tracing_snapshot(), but it will allocate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * snapshot buffer if it isn't already allocated. Use this only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * where it is safe to sleep, as the allocation may sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * This causes a swap between the snapshot buffer and the current live
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * tracing buffer. You can use this to take snapshots of the live
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * trace when some condition is triggered, but continue to trace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) void tracing_snapshot_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) ret = tracing_alloc_snapshot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) tracing_snapshot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * @tr: The tracing instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * @cond_data: User data to associate with the snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * @update: Implementation of the cond_snapshot update function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * Check whether the conditional snapshot for the given instance has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * already been enabled, or if the current tracer is already using a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * snapshot; if so, return -EBUSY, else create a cond_snapshot and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * save the cond_data and update function inside.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * Returns 0 if successful, error otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) cond_update_fn_t update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) struct cond_snapshot *cond_snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (!cond_snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) cond_snapshot->cond_data = cond_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) cond_snapshot->update = update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) ret = tracing_alloc_snapshot_instance(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (tr->current_trace->use_max_tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * The cond_snapshot can only change to NULL without the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * trace_types_lock. We don't care if we race with it going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * to NULL, but we want to make sure that it's not set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * something other than NULL when we get here, which we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * do safely with only holding the trace_types_lock and not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * having to take the max_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (tr->cond_snapshot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) arch_spin_lock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) tr->cond_snapshot = cond_snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) arch_spin_unlock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) fail_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) kfree(cond_snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * @tr: The tracing instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * Check whether the conditional snapshot for the given instance is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * enabled; if so, free the cond_snapshot associated with it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * otherwise return -EINVAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * Returns 0 if successful, error otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) int tracing_snapshot_cond_disable(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) arch_spin_lock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (!tr->cond_snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) kfree(tr->cond_snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) tr->cond_snapshot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) arch_spin_unlock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) void tracing_snapshot(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) EXPORT_SYMBOL_GPL(tracing_snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) int tracing_alloc_snapshot(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) void tracing_snapshot_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /* Give warning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) tracing_snapshot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) void *tracing_cond_snapshot_data(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) int tracing_snapshot_cond_disable(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) #endif /* CONFIG_TRACER_SNAPSHOT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) void tracer_tracing_off(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (tr->array_buffer.buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) ring_buffer_record_off(tr->array_buffer.buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * This flag is looked at when buffers haven't been allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * yet, or by some tracers (like irqsoff), that just want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * know if the ring buffer has been disabled, but it can handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * races of where it gets disabled but we still do a record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * As the check is in the fast path of the tracers, it is more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * important to be fast than accurate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) tr->buffer_disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) /* Make the flag seen by readers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * tracing_off - turn off tracing buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * This function stops the tracing buffers from recording data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * It does not disable any overhead the tracers themselves may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * be causing. This function simply causes all recording to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * the ring buffers to fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) void tracing_off(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) tracer_tracing_off(&global_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) EXPORT_SYMBOL_GPL(tracing_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) void disable_trace_on_warning(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (__disable_trace_on_warning) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) "Disabling tracing due to warning\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) tracing_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * tracer_tracing_is_on - show real state of ring buffer enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) * @tr : the trace array to know if ring buffer is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * Shows real state of the ring buffer if it is enabled or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) bool tracer_tracing_is_on(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (tr->array_buffer.buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return ring_buffer_record_is_on(tr->array_buffer.buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) return !tr->buffer_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) * tracing_is_on - show state of ring buffers enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) int tracing_is_on(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return tracer_tracing_is_on(&global_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) EXPORT_SYMBOL_GPL(tracing_is_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static int __init set_buf_size(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) unsigned long buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) buf_size = memparse(str, &str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * nr_entries can not be zero and the startup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * tests require some buffer space. Therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) * ensure we have at least 4096 bytes of buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) trace_buf_size = max(4096UL, buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) __setup("trace_buf_size=", set_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) static int __init set_tracing_thresh(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) unsigned long threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) ret = kstrtoul(str, 0, &threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) tracing_thresh = threshold * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) __setup("tracing_thresh=", set_tracing_thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) unsigned long nsecs_to_usecs(unsigned long nsecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return nsecs / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * of strings in the order that the evals (enum) were defined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) #undef C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) #define C(a, b) b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /* These must match the bit postions in trace_iterator_flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) static const char *trace_options[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) TRACE_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) u64 (*func)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) int in_ns; /* is this clock in nanoseconds? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) } trace_clocks[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) { trace_clock_local, "local", 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) { trace_clock_global, "global", 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) { trace_clock_counter, "counter", 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) { trace_clock_jiffies, "uptime", 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) { trace_clock, "perf", 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) { ktime_get_mono_fast_ns, "mono", 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) { ktime_get_raw_fast_ns, "mono_raw", 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) { ktime_get_boot_fast_ns, "boot", 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) ARCH_TRACE_CLOCKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) bool trace_clock_in_ns(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (trace_clocks[tr->clock_id].in_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) * trace_parser_get_init - gets the buffer for trace parser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) int trace_parser_get_init(struct trace_parser *parser, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) memset(parser, 0, sizeof(*parser));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) parser->buffer = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (!parser->buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) parser->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) * trace_parser_put - frees the buffer for trace parser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) void trace_parser_put(struct trace_parser *parser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) kfree(parser->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) parser->buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * trace_get_user - reads the user input string separated by space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * (matched by isspace(ch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * For each string found the 'struct trace_parser' is updated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * and the function returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) * Returns number of bytes read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) * See kernel/trace/trace.h for 'struct trace_parser' details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) char ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) size_t read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (!*ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) trace_parser_clear(parser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) ret = get_user(ch, ubuf++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) read++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) * The parser is not finished with the last write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) * continue reading the user input without skipping spaces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (!parser->cont) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) /* skip white space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) while (cnt && isspace(ch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) ret = get_user(ch, ubuf++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) read++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) parser->idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) /* only spaces were written */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (isspace(ch) || !ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) *ppos += read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) ret = read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) /* read the non-space input */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) while (cnt && !isspace(ch) && ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (parser->idx < parser->size - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) parser->buffer[parser->idx++] = ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) ret = get_user(ch, ubuf++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) read++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) /* We either got finished input or we have to wait for another call. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) if (isspace(ch) || !ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) parser->buffer[parser->idx] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) parser->cont = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) } else if (parser->idx < parser->size - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) parser->cont = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) parser->buffer[parser->idx++] = ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) /* Make sure the parsed string always terminates with '\0'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) parser->buffer[parser->idx] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) *ppos += read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) ret = read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) /* TODO add a seq_buf_to_buffer() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (trace_seq_used(s) <= s->seq.readpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) len = trace_seq_used(s) - s->seq.readpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (cnt > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) cnt = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) memcpy(buf, s->buffer + s->seq.readpos, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) s->seq.readpos += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) unsigned long __read_mostly tracing_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) static const struct file_operations tracing_max_lat_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) defined(CONFIG_FSNOTIFY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) static struct workqueue_struct *fsnotify_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) static void latency_fsnotify_workfn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) struct trace_array *tr = container_of(work, struct trace_array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) fsnotify_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) struct trace_array *tr = container_of(iwork, struct trace_array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) fsnotify_irqwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) queue_work(fsnotify_wq, &tr->fsnotify_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) static void trace_create_maxlat_file(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) struct dentry *d_tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) d_tracer, &tr->max_latency,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) &tracing_max_lat_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) __init static int latency_fsnotify_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) WQ_UNBOUND | WQ_HIGHPRI, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) if (!fsnotify_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) pr_err("Unable to allocate tr_max_lat_wq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) late_initcall_sync(latency_fsnotify_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) void latency_fsnotify(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (!fsnotify_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * We cannot call queue_work(&tr->fsnotify_work) from here because it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * possible that we are called from __schedule() or do_idle(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * could cause a deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) irq_work_queue(&tr->fsnotify_irqwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) * defined(CONFIG_FSNOTIFY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) #define trace_create_maxlat_file(tr, d_tracer) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) trace_create_file("tracing_max_latency", 0644, d_tracer, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) &tr->max_latency, &tracing_max_lat_fops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) * Copy the new maximum trace into the separate maximum-trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * structure. (this way the maximum trace is permanently saved,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) struct array_buffer *trace_buf = &tr->array_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) struct array_buffer *max_buf = &tr->max_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) max_buf->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) max_buf->time_start = data->preempt_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) max_data->saved_latency = tr->max_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) max_data->critical_start = data->critical_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) max_data->critical_end = data->critical_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) max_data->pid = tsk->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * If tsk == current, then use current_uid(), as that does not use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * RCU. The irq tracer can be called out of RCU scope.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (tsk == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) max_data->uid = current_uid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) max_data->uid = task_uid(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) max_data->policy = tsk->policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) max_data->rt_priority = tsk->rt_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) /* record this tasks comm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) tracing_record_cmdline(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) latency_fsnotify(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * update_max_tr - snapshot all trace buffers from global_trace to max_tr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * @tr: tracer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) * @tsk: the task with the latency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) * @cpu: The cpu that initiated the trace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) * @cond_data: User data associated with a conditional snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) * Flip the buffers between the @tr and the max_tr and record information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) * about which task was the cause of this latency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) void *cond_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (tr->stop_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) WARN_ON_ONCE(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (!tr->allocated_snapshot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) /* Only the nop tracer should hit this when disabling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) WARN_ON_ONCE(tr->current_trace != &nop_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) arch_spin_lock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /* Inherit the recordable setting from array_buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) ring_buffer_record_on(tr->max_buffer.buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) ring_buffer_record_off(tr->max_buffer.buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) #ifdef CONFIG_TRACER_SNAPSHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) __update_max_tr(tr, tsk, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) arch_spin_unlock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * update_max_tr_single - only copy one trace over, and reset the rest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * @tr: tracer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * @tsk: task with the latency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * @cpu: the cpu of the buffer to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * Flip the trace of a single CPU buffer between the @tr and the max_tr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (tr->stop_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) WARN_ON_ONCE(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (!tr->allocated_snapshot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) /* Only the nop tracer should hit this when disabling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) WARN_ON_ONCE(tr->current_trace != &nop_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) arch_spin_lock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) if (ret == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * We failed to swap the buffer due to a commit taking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) * place on this CPU. We fail to record, but we reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * the max trace buffer (no one writes directly to it)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * and flag that it failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) "Failed to swap buffers due to commit in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) __update_max_tr(tr, tsk, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) arch_spin_unlock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) #endif /* CONFIG_TRACER_MAX_TRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) static int wait_on_pipe(struct trace_iterator *iter, int full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) /* Iterators are static, they should be filled or empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (trace_buffer_iter(iter, iter->cpu_file))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) #ifdef CONFIG_FTRACE_STARTUP_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) static bool selftests_can_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) struct trace_selftests {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) struct tracer *type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) static LIST_HEAD(postponed_selftests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) static int save_selftest(struct tracer *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) struct trace_selftests *selftest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (!selftest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) selftest->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) list_add(&selftest->list, &postponed_selftests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) static int run_tracer_selftest(struct tracer *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct trace_array *tr = &global_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) struct tracer *saved_tracer = tr->current_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) if (!type->selftest || tracing_selftest_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * If a tracer registers early in boot up (before scheduling is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) * initialized and such), then do not run its selftests yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * Instead, run it a little later in the boot process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (!selftests_can_run)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) return save_selftest(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) * Run a selftest on this tracer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) * Here we reset the trace buffer, and set the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) * tracer to be this tracer. The tracer can then run some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) * internal tracing to verify that everything is in order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) * If we fail, we do not register this tracer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) tracing_reset_online_cpus(&tr->array_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) tr->current_trace = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (type->use_max_tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) /* If we expanded the buffers, make sure the max is expanded too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if (ring_buffer_expanded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) RING_BUFFER_ALL_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) tr->allocated_snapshot = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) /* the test is responsible for initializing and enabling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) pr_info("Testing tracer %s: ", type->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) ret = type->selftest(type, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) /* the test is responsible for resetting too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) tr->current_trace = saved_tracer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) printk(KERN_CONT "FAILED!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) /* Add the warning after printing 'FAILED' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) /* Only reset on passing, to avoid touching corrupted buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) tracing_reset_online_cpus(&tr->array_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if (type->use_max_tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) tr->allocated_snapshot = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) /* Shrink the max buffer again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (ring_buffer_expanded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) ring_buffer_resize(tr->max_buffer.buffer, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) RING_BUFFER_ALL_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) printk(KERN_CONT "PASSED\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) static __init int init_trace_selftests(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) struct trace_selftests *p, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) struct tracer *t, **last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) selftests_can_run = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) if (list_empty(&postponed_selftests))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) pr_info("Running postponed tracer tests:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) tracing_selftest_running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) list_for_each_entry_safe(p, n, &postponed_selftests, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) /* This loop can take minutes when sanitizers are enabled, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * lets make sure we allow RCU processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) ret = run_tracer_selftest(p->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) /* If the test fails, then warn and remove from available_tracers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) WARN(1, "tracer: %s failed selftest, disabling\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) p->type->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) last = &trace_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) for (t = trace_types; t; t = t->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) if (t == p->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) *last = t->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) last = &t->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) list_del(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) tracing_selftest_running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) core_initcall(init_trace_selftests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) static inline int run_tracer_selftest(struct tracer *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) #endif /* CONFIG_FTRACE_STARTUP_TEST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) static void add_tracer_options(struct trace_array *tr, struct tracer *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) static void __init apply_trace_boot_options(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * register_tracer - register a tracer with the ftrace system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) * @type: the plugin for the tracer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * Register a new plugin tracer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) int __init register_tracer(struct tracer *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) struct tracer *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (!type->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) pr_info("Tracer must have a name\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (strlen(type->name) >= MAX_TRACER_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (security_locked_down(LOCKDOWN_TRACEFS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) pr_warn("Can not register tracer %s due to lockdown\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) type->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) tracing_selftest_running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) for (t = trace_types; t; t = t->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) if (strcmp(type->name, t->name) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) /* already found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) pr_info("Tracer %s already registered\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) type->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) if (!type->set_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) type->set_flag = &dummy_set_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) if (!type->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) /*allocate a dummy tracer_flags*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) if (!type->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) type->flags->val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) type->flags->opts = dummy_tracer_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) if (!type->flags->opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) type->flags->opts = dummy_tracer_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /* store the tracer for __set_tracer_option */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) type->flags->trace = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) ret = run_tracer_selftest(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) type->next = trace_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) trace_types = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) add_tracer_options(&global_trace, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) tracing_selftest_running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) if (ret || !default_bootup_tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) printk(KERN_INFO "Starting tracer '%s'\n", type->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) /* Do we want this tracer to start on bootup? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) tracing_set_tracer(&global_trace, type->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) default_bootup_tracer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) apply_trace_boot_options();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) /* disable other selftests, since this will break it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) disable_tracing_selftest("running a tracer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) struct trace_buffer *buffer = buf->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) ring_buffer_record_disable(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) /* Make sure all commits have finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) ring_buffer_reset_cpu(buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) ring_buffer_record_enable(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) void tracing_reset_online_cpus(struct array_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) struct trace_buffer *buffer = buf->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) ring_buffer_record_disable(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) /* Make sure all commits have finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) buf->time_start = buffer_ftrace_now(buf, buf->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) ring_buffer_reset_online_cpus(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) ring_buffer_record_enable(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) /* Must have trace_types_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) void tracing_reset_all_online_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) list_for_each_entry(tr, &ftrace_trace_arrays, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (!tr->clear_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) tr->clear_trace = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) tracing_reset_online_cpus(&tr->array_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) tracing_reset_online_cpus(&tr->max_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) * is the tgid last observed corresponding to pid=i.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) static int *tgid_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) /* The maximum valid index into tgid_map. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) static size_t tgid_map_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) #define SAVED_CMDLINES_DEFAULT 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) #define NO_CMDLINE_MAP UINT_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) struct saved_cmdlines_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) unsigned *map_cmdline_to_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) unsigned cmdline_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) int cmdline_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) char *saved_cmdlines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) static struct saved_cmdlines_buffer *savedcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) static inline char *get_saved_cmdlines(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) static inline void set_cmdline(int idx, const char *cmdline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) static int allocate_cmdlines_buffer(unsigned int val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) struct saved_cmdlines_buffer *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) s->map_cmdline_to_pid = kmalloc_array(val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) sizeof(*s->map_cmdline_to_pid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) if (!s->map_cmdline_to_pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) if (!s->saved_cmdlines) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) kfree(s->map_cmdline_to_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) s->cmdline_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) s->cmdline_num = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) sizeof(s->map_pid_to_cmdline));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) val * sizeof(*s->map_cmdline_to_pid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) static int trace_create_savedcmd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) if (!savedcmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) kfree(savedcmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) savedcmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) int is_tracing_stopped(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) return global_trace.stop_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) * tracing_start - quick start of the tracer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) * If tracing is enabled but was stopped by tracing_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) * this will start the tracer back up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) void tracing_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) struct trace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) if (tracing_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) raw_spin_lock_irqsave(&global_trace.start_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) if (--global_trace.stop_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) if (global_trace.stop_count < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) /* Someone screwed up their debugging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) global_trace.stop_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) /* Prevent the buffers from switching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) arch_spin_lock(&global_trace.max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) buffer = global_trace.array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) if (buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) ring_buffer_record_enable(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) buffer = global_trace.max_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) if (buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) ring_buffer_record_enable(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) arch_spin_unlock(&global_trace.max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) static void tracing_start_tr(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) struct trace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) if (tracing_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) /* If global, we need to also start the max tracer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) return tracing_start();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) raw_spin_lock_irqsave(&tr->start_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) if (--tr->stop_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (tr->stop_count < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) /* Someone screwed up their debugging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) tr->stop_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) buffer = tr->array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) if (buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) ring_buffer_record_enable(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) raw_spin_unlock_irqrestore(&tr->start_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) * tracing_stop - quick stop of the tracer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) * Light weight way to stop tracing. Use in conjunction with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) * tracing_start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) void tracing_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) struct trace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) raw_spin_lock_irqsave(&global_trace.start_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) if (global_trace.stop_count++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) /* Prevent the buffers from switching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) arch_spin_lock(&global_trace.max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) buffer = global_trace.array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) if (buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) ring_buffer_record_disable(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) buffer = global_trace.max_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) if (buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) ring_buffer_record_disable(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) arch_spin_unlock(&global_trace.max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) static void tracing_stop_tr(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) struct trace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) /* If global, we need to also stop the max tracer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) return tracing_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) raw_spin_lock_irqsave(&tr->start_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) if (tr->stop_count++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) buffer = tr->array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if (buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) ring_buffer_record_disable(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) raw_spin_unlock_irqrestore(&tr->start_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) static int trace_save_cmdline(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) unsigned tpid, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) /* treat recording of idle task as a success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) if (!tsk->pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) * It's not the end of the world if we don't get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) * the lock, but we also don't want to spin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) * nor do we want to disable interrupts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) * so if we miss here, then better luck next time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (!arch_spin_trylock(&trace_cmdline_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) idx = savedcmd->map_pid_to_cmdline[tpid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) if (idx == NO_CMDLINE_MAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) savedcmd->map_pid_to_cmdline[tpid] = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) savedcmd->cmdline_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) set_cmdline(idx, tsk->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) arch_spin_unlock(&trace_cmdline_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) static void __trace_find_cmdline(int pid, char comm[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) unsigned map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) int tpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) if (!pid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) strcpy(comm, "<idle>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) if (WARN_ON_ONCE(pid < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) strcpy(comm, "<XXX>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) tpid = pid & (PID_MAX_DEFAULT - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) map = savedcmd->map_pid_to_cmdline[tpid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) if (map != NO_CMDLINE_MAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) tpid = savedcmd->map_cmdline_to_pid[map];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) if (tpid == pid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) strcpy(comm, "<...>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) void trace_find_cmdline(int pid, char comm[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) arch_spin_lock(&trace_cmdline_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) __trace_find_cmdline(pid, comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) arch_spin_unlock(&trace_cmdline_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) static int *trace_find_tgid_ptr(int pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) * Pairs with the smp_store_release in set_tracer_flag() to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) * if we observe a non-NULL tgid_map then we also observe the correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) * tgid_map_max.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) int *map = smp_load_acquire(&tgid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (unlikely(!map || pid > tgid_map_max))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) return &map[pid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) int trace_find_tgid(int pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) int *ptr = trace_find_tgid_ptr(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) return ptr ? *ptr : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) static int trace_save_tgid(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) int *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) /* treat recording of idle task as a success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) if (!tsk->pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) ptr = trace_find_tgid_ptr(tsk->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) *ptr = tsk->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) static bool tracing_record_taskinfo_skip(int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) if (!__this_cpu_read(trace_taskinfo_save))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) * tracing_record_taskinfo - record the task info of a task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) * @task: task to record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) * @flags: TRACE_RECORD_CMDLINE for recording comm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) * TRACE_RECORD_TGID for recording tgid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) void tracing_record_taskinfo(struct task_struct *task, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) bool done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) if (tracing_record_taskinfo_skip(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) * Record as much task information as possible. If some fail, continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) * to try to record the others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) /* If recording any information failed, retry again soon. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) if (!done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) __this_cpu_write(trace_taskinfo_save, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) * tracing_record_taskinfo_sched_switch - record task info for sched_switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) * @prev: previous task during sched_switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) * @next: next task during sched_switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) * @flags: TRACE_RECORD_CMDLINE for recording comm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) * TRACE_RECORD_TGID for recording tgid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) struct task_struct *next, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) bool done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) if (tracing_record_taskinfo_skip(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) * Record as much task information as possible. If some fail, continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) * to try to record the others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) /* If recording any information failed, retry again soon. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) if (!done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) __this_cpu_write(trace_taskinfo_save, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) /* Helpers to record a specific task information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) void tracing_record_cmdline(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) void tracing_record_tgid(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) tracing_record_taskinfo(task, TRACE_RECORD_TGID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) * simplifies those functions and keeps them in sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) enum print_line_t trace_handle_return(struct trace_seq *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) return trace_seq_has_overflowed(s) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) EXPORT_SYMBOL_GPL(trace_handle_return);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) unsigned long flags, int pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) entry->preempt_count = pc & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) entry->pid = (tsk) ? tsk->pid : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) entry->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) entry->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) TRACE_FLAG_IRQS_NOSUPPORT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) struct ring_buffer_event *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) trace_buffer_lock_reserve(struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) unsigned long flags, int pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) DEFINE_PER_CPU(int, trace_buffered_event_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) static int trace_buffered_event_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) * trace_buffered_event_enable - enable buffering events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) * When events are being filtered, it is quicker to use a temporary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) * buffer to write the event data into if there's a likely chance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) * that it will not be committed. The discard of the ring buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) * is not as fast as committing, and is much slower than copying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) * a commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) * When an event is to be filtered, allocate per cpu buffers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) * write the event data into, and if the event is filtered and discarded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) * it is simply dropped, otherwise, the entire data is to be committed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) * in one shot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) void trace_buffered_event_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) if (trace_buffered_event_ref++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) for_each_tracing_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) page = alloc_pages_node(cpu_to_node(cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) GFP_KERNEL | __GFP_NORETRY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) event = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) memset(event, 0, sizeof(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) per_cpu(trace_buffered_event, cpu) = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) if (cpu == smp_processor_id() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) __this_cpu_read(trace_buffered_event) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) per_cpu(trace_buffered_event, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) trace_buffered_event_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) static void enable_trace_buffered_event(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) /* Probably not needed, but do it anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) this_cpu_dec(trace_buffered_event_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) static void disable_trace_buffered_event(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) this_cpu_inc(trace_buffered_event_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) * trace_buffered_event_disable - disable buffering events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) * When a filter is removed, it is faster to not use the buffered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) * events, and to commit directly into the ring buffer. Free up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) * the temp buffers when there are no more users. This requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) * special synchronization with current events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) void trace_buffered_event_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) if (WARN_ON_ONCE(!trace_buffered_event_ref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) if (--trace_buffered_event_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) /* For each CPU, set the buffer as used. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) smp_call_function_many(tracing_buffer_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) disable_trace_buffered_event, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) /* Wait for all current users to finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) for_each_tracing_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) per_cpu(trace_buffered_event, cpu) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) * Make sure trace_buffered_event is NULL before clearing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) * trace_buffered_event_cnt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) /* Do the work on each cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) smp_call_function_many(tracing_buffer_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) enable_trace_buffered_event, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) static struct trace_buffer *temp_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) struct ring_buffer_event *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) struct trace_event_file *trace_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) int type, unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) unsigned long flags, int pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) struct ring_buffer_event *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) *current_rb = trace_file->tr->array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) (entry = this_cpu_read(trace_buffered_event))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) /* Try to use the per cpu buffer first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) val = this_cpu_inc_return(trace_buffered_event_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) trace_event_setup(entry, type, flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) entry->array[0] = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) this_cpu_dec(trace_buffered_event_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) entry = __trace_buffer_lock_reserve(*current_rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) type, len, flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) * If tracing is off, but we have triggers enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) * we still need to look at the event data. Use the temp_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) * to store the trace event for the trigger to use. It's recursive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) * safe and will not be recorded anywhere.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) *current_rb = temp_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) entry = __trace_buffer_lock_reserve(*current_rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) type, len, flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) static DEFINE_SPINLOCK(tracepoint_iter_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) static DEFINE_MUTEX(tracepoint_printk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) static void output_printk(struct trace_event_buffer *fbuffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) struct trace_event_call *event_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) struct trace_event_file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) struct trace_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) struct trace_iterator *iter = tracepoint_print_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) /* We should never get here if iter is NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) if (WARN_ON_ONCE(!iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) event_call = fbuffer->trace_file->event_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) if (!event_call || !event_call->event.funcs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) !event_call->event.funcs->trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) file = fbuffer->trace_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) !filter_match_preds(file->filter, fbuffer->entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) event = &fbuffer->trace_file->event_call->event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) spin_lock_irqsave(&tracepoint_iter_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) trace_seq_init(&iter->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) iter->ent = fbuffer->entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) event_call->event.funcs->trace(iter, 0, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) trace_seq_putc(&iter->seq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) printk("%s", iter->seq.buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) int tracepoint_printk_sysctl(struct ctl_table *table, int write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) void *buffer, size_t *lenp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) int save_tracepoint_printk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) mutex_lock(&tracepoint_printk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) save_tracepoint_printk = tracepoint_printk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) ret = proc_dointvec(table, write, buffer, lenp, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) * This will force exiting early, as tracepoint_printk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) * is always zero when tracepoint_printk_iter is not allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) if (!tracepoint_print_iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) tracepoint_printk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) if (save_tracepoint_printk == tracepoint_printk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) if (tracepoint_printk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) static_key_enable(&tracepoint_printk_key.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) static_key_disable(&tracepoint_printk_key.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) mutex_unlock(&tracepoint_printk_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) if (static_key_false(&tracepoint_printk_key.key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) output_printk(fbuffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) if (static_branch_unlikely(&trace_event_exports_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) fbuffer->event, fbuffer->entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) fbuffer->flags, fbuffer->pc, fbuffer->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) * Skip 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) * trace_buffer_unlock_commit_regs()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) * trace_event_buffer_commit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) * trace_event_raw_event_xxx()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) # define STACK_SKIP 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) void trace_buffer_unlock_commit_regs(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) struct ring_buffer_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) unsigned long flags, int pc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) __buffer_unlock_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) * If regs is not set, then skip the necessary functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) * Note, we can still get here via blktrace, wakeup tracer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) * and mmiotrace, but that's ok if they lose a function or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) * two. They are not that meaningful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) ftrace_trace_userstack(tr, buffer, flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) struct ring_buffer_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) __buffer_unlock_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) trace_function(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) unsigned long ip, unsigned long parent_ip, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) int pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) struct trace_event_call *call = &event_function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) struct trace_buffer *buffer = tr->array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) struct ftrace_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) entry->ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) entry->parent_ip = parent_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) if (!call_filter_check_discard(call, entry, buffer, event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) if (static_branch_unlikely(&trace_function_exports_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) ftrace_exports(event, TRACE_EXPORT_FUNCTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) __buffer_unlock_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) #ifdef CONFIG_STACKTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) #define FTRACE_KSTACK_NESTING 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) struct ftrace_stack {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) unsigned long calls[FTRACE_KSTACK_ENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) struct ftrace_stacks {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) static DEFINE_PER_CPU(int, ftrace_stack_reserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) static void __ftrace_trace_stack(struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) int skip, int pc, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) struct trace_event_call *call = &event_kernel_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) unsigned int size, nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) struct ftrace_stack *fstack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) struct stack_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) int stackidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) * Add one, for this function and the call to save_stack_trace()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) * If regs is set, then these functions will not be in the way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) #ifndef CONFIG_UNWINDER_ORC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) if (!regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) skip++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) /* This should never happen. If it does, yell once and skip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) * The above __this_cpu_inc_return() is 'atomic' cpu local. An
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) * interrupt will either see the value pre increment or post
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) * increment. If the interrupt happens pre increment it will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) * restored the counter when it returns. We just need a barrier to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) * keep gcc from moving things around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) size = ARRAY_SIZE(fstack->calls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) if (regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) nr_entries = stack_trace_save_regs(regs, fstack->calls,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) size, skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) nr_entries = stack_trace_save(fstack->calls, size, skip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) size = nr_entries * sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) (sizeof(*entry) - sizeof(entry->caller)) + size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) memcpy(&entry->caller, fstack->calls, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) entry->size = nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) if (!call_filter_check_discard(call, entry, buffer, event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) __buffer_unlock_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) /* Again, don't let gcc optimize things here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) __this_cpu_dec(ftrace_stack_reserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) static inline void ftrace_trace_stack(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) int skip, int pc, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) __ftrace_trace_stack(buffer, flags, skip, pc, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) int pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) struct trace_buffer *buffer = tr->array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) if (rcu_is_watching()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) * but if the above rcu_is_watching() failed, then the NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) * triggered someplace critical, and rcu_irq_enter() should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) * not be called from NMI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) if (unlikely(in_nmi()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) rcu_irq_enter_irqson();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) rcu_irq_exit_irqson();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) * trace_dump_stack - record a stack back trace in the trace buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) * @skip: Number of functions to skip (helper handlers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) void trace_dump_stack(int skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) if (tracing_disabled || tracing_selftest_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) local_save_flags(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) #ifndef CONFIG_UNWINDER_ORC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) /* Skip 1 to skip this function. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) skip++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) __ftrace_trace_stack(global_trace.array_buffer.buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) flags, skip, preempt_count(), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) EXPORT_SYMBOL_GPL(trace_dump_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) #ifdef CONFIG_USER_STACKTRACE_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) static DEFINE_PER_CPU(int, user_stack_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) ftrace_trace_userstack(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) struct trace_buffer *buffer, unsigned long flags, int pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) struct trace_event_call *call = &event_user_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) struct userstack_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) * NMIs can not handle page faults, even with fix ups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) * The save user stack can (and often does) fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) if (unlikely(in_nmi()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) * prevent recursion, since the user stack tracing may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) * trigger other kernel events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) if (__this_cpu_read(user_stack_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) __this_cpu_inc(user_stack_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) sizeof(*entry), flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) goto out_drop_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) entry->tgid = current->tgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) memset(&entry->caller, 0, sizeof(entry->caller));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) if (!call_filter_check_discard(call, entry, buffer, event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) __buffer_unlock_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) out_drop_count:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) __this_cpu_dec(user_stack_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) #else /* CONFIG_USER_STACKTRACE_SUPPORT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) static void ftrace_trace_userstack(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) unsigned long flags, int pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) #endif /* CONFIG_STACKTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) /* created for use with alloc_percpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) struct trace_buffer_struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) int nesting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) char buffer[4][TRACE_BUF_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) static struct trace_buffer_struct __percpu *trace_percpu_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) * Thise allows for lockless recording. If we're nested too deeply, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) * this returns NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) static char *get_trace_buf(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) if (!trace_percpu_buffer || buffer->nesting >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) buffer->nesting++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) /* Interrupts must see nesting incremented before we use the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) return &buffer->buffer[buffer->nesting - 1][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) static void put_trace_buf(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) /* Don't let the decrement of nesting leak before this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) this_cpu_dec(trace_percpu_buffer->nesting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) static int alloc_percpu_trace_buffer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) struct trace_buffer_struct __percpu *buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) if (trace_percpu_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) buffers = alloc_percpu(struct trace_buffer_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) trace_percpu_buffer = buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) static int buffers_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) void trace_printk_init_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) if (buffers_allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) if (alloc_percpu_trace_buffer())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) /* trace_printk() is for debug use only. Don't use it in production. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) pr_warn("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) pr_warn("**********************************************************\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) pr_warn("** **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) pr_warn("** **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) pr_warn("** This means that this is a DEBUG kernel and it is **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) pr_warn("** unsafe for production use. **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) pr_warn("** **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) pr_warn("** If you see this message and you are not debugging **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) pr_warn("** the kernel, report this immediately to your vendor! **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) pr_warn("** **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) pr_warn("**********************************************************\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) /* Expand the buffers to set size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) tracing_update_buffers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) buffers_allocated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) * trace_printk_init_buffers() can be called by modules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) * If that happens, then we need to start cmdline recording
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) * directly here. If the global_trace.buffer is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) * allocated here, then this was called by module code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) if (global_trace.array_buffer.buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) tracing_start_cmdline_record();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) void trace_printk_start_comm(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) /* Start tracing comms if trace printk is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) if (!buffers_allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) tracing_start_cmdline_record();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) static void trace_printk_start_stop_comm(int enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) if (!buffers_allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) if (enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) tracing_start_cmdline_record();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) tracing_stop_cmdline_record();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) * trace_vbprintk - write binary msg to tracing buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) * @ip: The address of the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) * @fmt: The string format to write to the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) * @args: Arguments for @fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) struct trace_event_call *call = &event_bprint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) struct trace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) struct trace_array *tr = &global_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) struct bprint_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) char *tbuffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) int len = 0, size, pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) if (unlikely(tracing_selftest_running || tracing_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) /* Don't pollute graph traces with trace_vprintk internals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) pause_graph_tracing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) pc = preempt_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) tbuffer = get_trace_buf();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) if (!tbuffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) goto out_nobuffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) local_save_flags(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) size = sizeof(*entry) + sizeof(u32) * len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) buffer = tr->array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) ring_buffer_nest_start(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) entry->ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) entry->fmt = fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) memcpy(entry->buf, tbuffer, sizeof(u32) * len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) if (!call_filter_check_discard(call, entry, buffer, event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) __buffer_unlock_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) ring_buffer_nest_end(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) put_trace_buf();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) out_nobuffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) unpause_graph_tracing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) EXPORT_SYMBOL_GPL(trace_vbprintk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) __printf(3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) __trace_array_vprintk(struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) unsigned long ip, const char *fmt, va_list args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) struct trace_event_call *call = &event_print;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) int len = 0, size, pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) struct print_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) char *tbuffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) if (tracing_disabled || tracing_selftest_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) /* Don't pollute graph traces with trace_vprintk internals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) pause_graph_tracing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) pc = preempt_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) tbuffer = get_trace_buf();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) if (!tbuffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) goto out_nobuffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) local_save_flags(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) size = sizeof(*entry) + len + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) ring_buffer_nest_start(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) entry->ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) memcpy(&entry->buf, tbuffer, len + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) if (!call_filter_check_discard(call, entry, buffer, event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) __buffer_unlock_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) ring_buffer_nest_end(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) put_trace_buf();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) out_nobuffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) unpause_graph_tracing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) __printf(3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) int trace_array_vprintk(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) unsigned long ip, const char *fmt, va_list args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) * trace_array_printk - Print a message to a specific instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) * @tr: The instance trace_array descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) * @ip: The instruction pointer that this is called from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) * @fmt: The format to print (printf format)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) * If a subsystem sets up its own instance, they have the right to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) * printk strings into their tracing instance buffer using this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) * function. Note, this function will not write into the top level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) * buffer (use trace_printk() for that), as writing into the top level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) * buffer should only have events that can be individually disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) * trace_printk() is only used for debugging a kernel, and should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) * be ever encorporated in normal use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) * trace_array_printk() can be used, as it will not add noise to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) * top level tracing buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) * Note, trace_array_init_printk() must be called on @tr before this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) * can be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) __printf(3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) int trace_array_printk(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) unsigned long ip, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) va_list ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) /* This is only allowed for created instances */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) if (tr == &global_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) if (!(tr->trace_flags & TRACE_ITER_PRINTK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) va_start(ap, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) ret = trace_array_vprintk(tr, ip, fmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) va_end(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) EXPORT_SYMBOL_GPL(trace_array_printk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) * trace_array_init_printk - Initialize buffers for trace_array_printk()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) * @tr: The trace array to initialize the buffers for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) * As trace_array_printk() only writes into instances, they are OK to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) * have in the kernel (unlike trace_printk()). This needs to be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) * before trace_array_printk() can be used on a trace_array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) int trace_array_init_printk(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) /* This is only allowed for created instances */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) if (tr == &global_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) return alloc_percpu_trace_buffer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) EXPORT_SYMBOL_GPL(trace_array_init_printk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) __printf(3, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) int trace_array_printk_buf(struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) unsigned long ip, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) va_list ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) va_start(ap, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) ret = __trace_array_vprintk(buffer, ip, fmt, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) va_end(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) __printf(2, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) return trace_array_vprintk(&global_trace, ip, fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) EXPORT_SYMBOL_GPL(trace_vprintk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) static void trace_iterator_increment(struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) iter->idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) if (buf_iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) ring_buffer_iter_advance(buf_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) static struct trace_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) unsigned long *lost_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) if (buf_iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) event = ring_buffer_iter_peek(buf_iter, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) if (lost_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) *lost_events = ring_buffer_iter_dropped(buf_iter) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) (unsigned long)-1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) lost_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) if (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) iter->ent_size = ring_buffer_event_length(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) return ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) iter->ent_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) static struct trace_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) unsigned long *missing_events, u64 *ent_ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) struct trace_buffer *buffer = iter->array_buffer->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) struct trace_entry *ent, *next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) unsigned long lost_events = 0, next_lost = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) int cpu_file = iter->cpu_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) u64 next_ts = 0, ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) int next_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) int next_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) * If we are in a per_cpu trace file, don't bother by iterating over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) * all cpu and peek directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) if (cpu_file > RING_BUFFER_ALL_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) if (ring_buffer_empty_cpu(buffer, cpu_file))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) if (ent_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) *ent_cpu = cpu_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) return ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) for_each_tracing_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) if (ring_buffer_empty_cpu(buffer, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) ent = peek_next_entry(iter, cpu, &ts, &lost_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) * Pick the entry with the smallest timestamp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) if (ent && (!next || ts < next_ts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) next = ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) next_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) next_ts = ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) next_lost = lost_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) next_size = iter->ent_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) iter->ent_size = next_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) if (ent_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) *ent_cpu = next_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) if (ent_ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) *ent_ts = next_ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) if (missing_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) *missing_events = next_lost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) #define STATIC_TEMP_BUF_SIZE 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) /* Find the next real entry, without updating the iterator itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) int *ent_cpu, u64 *ent_ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) /* __find_next_entry will reset ent_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) int ent_size = iter->ent_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) struct trace_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) * If called from ftrace_dump(), then the iter->temp buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) * will be the static_temp_buf and not created from kmalloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) * If the entry size is greater than the buffer, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) * not save it. Just return NULL in that case. This is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) * used to add markers when two consecutive events' time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) * stamps have a large delta. See trace_print_lat_context()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) if (iter->temp == static_temp_buf &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) STATIC_TEMP_BUF_SIZE < ent_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) * The __find_next_entry() may call peek_next_entry(), which may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) * call ring_buffer_peek() that may make the contents of iter->ent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) * undefined. Need to copy iter->ent now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) if (iter->ent && iter->ent != iter->temp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) if ((!iter->temp || iter->temp_size < iter->ent_size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) void *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) temp = kmalloc(iter->ent_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) if (!temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) kfree(iter->temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) iter->temp = temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) iter->temp_size = iter->ent_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) memcpy(iter->temp, iter->ent, iter->ent_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) iter->ent = iter->temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) /* Put back the original ent_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) iter->ent_size = ent_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) /* Find the next real entry, and increment the iterator to the next entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) void *trace_find_next_entry_inc(struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) iter->ent = __find_next_entry(iter, &iter->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) &iter->lost_events, &iter->ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) if (iter->ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) trace_iterator_increment(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) return iter->ent ? iter : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) static void trace_consume(struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) &iter->lost_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) static void *s_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) struct trace_iterator *iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) int i = (int)*pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) void *ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) WARN_ON_ONCE(iter->leftover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) (*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) /* can't go backwards */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) if (iter->idx > i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) if (iter->idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) ent = trace_find_next_entry_inc(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) ent = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) while (ent && iter->idx < i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) ent = trace_find_next_entry_inc(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) iter->pos = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) return ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) void tracing_iter_reset(struct trace_iterator *iter, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) struct ring_buffer_iter *buf_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) unsigned long entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) u64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) buf_iter = trace_buffer_iter(iter, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) if (!buf_iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) ring_buffer_iter_reset(buf_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) * We could have the case with the max latency tracers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) * that a reset never took place on a cpu. This is evident
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) * by the timestamp being before the start of the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) while (ring_buffer_iter_peek(buf_iter, &ts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) if (ts >= iter->array_buffer->time_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) ring_buffer_iter_advance(buf_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) * The current tracer is copied to avoid a global locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) * all around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) static void *s_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) struct trace_iterator *iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) struct trace_array *tr = iter->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) int cpu_file = iter->cpu_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) void *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) loff_t l = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) * copy the tracer to avoid using a global lock all around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) * iter->trace is a copy of current_trace, the pointer to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) * name may be used instead of a strcmp(), as iter->trace->name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) * will point to the same string as current_trace->name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) *iter->trace = *tr->current_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) if (iter->snapshot && iter->trace->use_max_tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) return ERR_PTR(-EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) if (*pos != iter->pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) iter->ent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) iter->cpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) iter->idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) if (cpu_file == RING_BUFFER_ALL_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) for_each_tracing_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) tracing_iter_reset(iter, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) tracing_iter_reset(iter, cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) iter->leftover = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) for (p = iter; p && l < *pos; p = s_next(m, p, &l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) * If we overflowed the seq_file before, then we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) * to just reuse the trace_seq buffer again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) if (iter->leftover)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) p = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) l = *pos - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) p = s_next(m, p, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) trace_event_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) trace_access_lock(cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) static void s_stop(struct seq_file *m, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) struct trace_iterator *iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) if (iter->snapshot && iter->trace->use_max_tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) trace_access_unlock(iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) trace_event_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) unsigned long *entries, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) count = ring_buffer_entries_cpu(buf->buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) * If this buffer has skipped entries, then we hold all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) * entries for the trace and we need to ignore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) * ones before the time stamp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) /* total is the same as the entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) *total = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) *total = count +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) ring_buffer_overrun_cpu(buf->buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) *entries = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) get_total_entries(struct array_buffer *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) unsigned long *total, unsigned long *entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) unsigned long t, e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) *total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) *entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) for_each_tracing_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) get_total_entries_cpu(buf, &t, &e, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) *total += t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) *entries += e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) unsigned long total, entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) tr = &global_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) return entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) unsigned long trace_total_entries(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) unsigned long total, entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) tr = &global_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) get_total_entries(&tr->array_buffer, &total, &entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) return entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) static void print_lat_help_header(struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) seq_puts(m, "# _------=> CPU# \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) "# / _-----=> irqs-off \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) "# | / _----=> need-resched \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) "# || / _---=> hardirq/softirq \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) "# ||| / _--=> preempt-depth \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) "# |||| / delay \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) "# cmd pid ||||| time | caller \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) "# \\ / ||||| \\ | / \n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) static void print_event_info(struct array_buffer *buf, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) unsigned long total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) unsigned long entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) get_total_entries(buf, &total, &entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) entries, total, num_online_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) seq_puts(m, "#\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) bool tgid = flags & TRACE_ITER_RECORD_TGID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) print_event_info(buf, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) bool tgid = flags & TRACE_ITER_RECORD_TGID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) const char *space = " ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) int prec = tgid ? 12 : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) print_event_info(buf, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) seq_printf(m, "# %.*s||| / delay\n", prec, space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) print_trace_header(struct seq_file *m, struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) struct array_buffer *buf = iter->array_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) struct tracer *type = iter->trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) unsigned long entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) unsigned long total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) const char *name = "preemption";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) name = type->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) get_total_entries(buf, &total, &entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) name, UTS_RELEASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) seq_puts(m, "# -----------------------------------"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) "---------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) nsecs_to_usecs(data->saved_latency),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) total,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) buf->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) #if defined(CONFIG_PREEMPT_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) "server",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) #elif defined(CONFIG_PREEMPT_VOLUNTARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) "desktop",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) #elif defined(CONFIG_PREEMPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) "preempt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) #elif defined(CONFIG_PREEMPT_RT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) "preempt_rt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) "unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) /* These are reserved for later use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 0, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) seq_printf(m, " #P:%d)\n", num_online_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) seq_puts(m, ")\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) seq_puts(m, "# -----------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) seq_printf(m, "# | task: %.16s-%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) data->comm, data->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) data->policy, data->rt_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) seq_puts(m, "# -----------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) if (data->critical_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) seq_puts(m, "# => started at: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) trace_print_seq(m, &iter->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) seq_puts(m, "\n# => ended at: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) trace_print_seq(m, &iter->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) seq_puts(m, "\n#\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) seq_puts(m, "#\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) static void test_cpu_buff_start(struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) struct trace_seq *s = &iter->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) struct trace_array *tr = iter->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) if (cpumask_available(iter->started) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) cpumask_test_cpu(iter->cpu, iter->started))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) if (cpumask_available(iter->started))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) cpumask_set_cpu(iter->cpu, iter->started);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) /* Don't print started cpu buffer for the first entry of the trace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) if (iter->idx > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) trace_seq_printf(s, "##### CPU %u buffer started ####\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) iter->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) struct trace_array *tr = iter->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) struct trace_seq *s = &iter->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) struct trace_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) struct trace_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) entry = iter->ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) test_cpu_buff_start(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) event = ftrace_find_event(entry->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) if (iter->iter_flags & TRACE_FILE_LAT_FMT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) trace_print_lat_context(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) trace_print_context(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) if (trace_seq_has_overflowed(s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) return TRACE_TYPE_PARTIAL_LINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) if (event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) return event->funcs->trace(iter, sym_flags, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) trace_seq_printf(s, "Unknown type %d\n", entry->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) return trace_handle_return(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) struct trace_array *tr = iter->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) struct trace_seq *s = &iter->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) struct trace_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) struct trace_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) entry = iter->ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) trace_seq_printf(s, "%d %d %llu ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) entry->pid, iter->cpu, iter->ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) if (trace_seq_has_overflowed(s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) return TRACE_TYPE_PARTIAL_LINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) event = ftrace_find_event(entry->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) if (event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) return event->funcs->raw(iter, 0, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) trace_seq_printf(s, "%d ?\n", entry->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) return trace_handle_return(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) struct trace_array *tr = iter->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) struct trace_seq *s = &iter->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) unsigned char newline = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) struct trace_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) struct trace_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) entry = iter->ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) SEQ_PUT_HEX_FIELD(s, entry->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) SEQ_PUT_HEX_FIELD(s, iter->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) SEQ_PUT_HEX_FIELD(s, iter->ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) if (trace_seq_has_overflowed(s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) return TRACE_TYPE_PARTIAL_LINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) event = ftrace_find_event(entry->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) if (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) enum print_line_t ret = event->funcs->hex(iter, 0, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) if (ret != TRACE_TYPE_HANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) SEQ_PUT_FIELD(s, newline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) return trace_handle_return(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) struct trace_array *tr = iter->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) struct trace_seq *s = &iter->seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) struct trace_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) struct trace_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) entry = iter->ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) SEQ_PUT_FIELD(s, entry->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) SEQ_PUT_FIELD(s, iter->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) SEQ_PUT_FIELD(s, iter->ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) if (trace_seq_has_overflowed(s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) return TRACE_TYPE_PARTIAL_LINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) event = ftrace_find_event(entry->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) return event ? event->funcs->binary(iter, 0, event) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) TRACE_TYPE_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) int trace_empty(struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) struct ring_buffer_iter *buf_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) /* If we are looking at one CPU buffer, only check that one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) cpu = iter->cpu_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) buf_iter = trace_buffer_iter(iter, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) if (buf_iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) if (!ring_buffer_iter_empty(buf_iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) for_each_tracing_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) buf_iter = trace_buffer_iter(iter, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) if (buf_iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) if (!ring_buffer_iter_empty(buf_iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) /* Called with trace_event_read_lock() held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) enum print_line_t print_trace_line(struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) struct trace_array *tr = iter->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) unsigned long trace_flags = tr->trace_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) enum print_line_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) if (iter->lost_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) if (iter->lost_events == (unsigned long)-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) iter->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) iter->cpu, iter->lost_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) if (trace_seq_has_overflowed(&iter->seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) return TRACE_TYPE_PARTIAL_LINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) if (iter->trace && iter->trace->print_line) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) ret = iter->trace->print_line(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) if (ret != TRACE_TYPE_UNHANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) if (iter->ent->type == TRACE_BPUTS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) trace_flags & TRACE_ITER_PRINTK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) trace_flags & TRACE_ITER_PRINTK_MSGONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) return trace_print_bputs_msg_only(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) if (iter->ent->type == TRACE_BPRINT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) trace_flags & TRACE_ITER_PRINTK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) trace_flags & TRACE_ITER_PRINTK_MSGONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) return trace_print_bprintk_msg_only(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) if (iter->ent->type == TRACE_PRINT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) trace_flags & TRACE_ITER_PRINTK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) trace_flags & TRACE_ITER_PRINTK_MSGONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) return trace_print_printk_msg_only(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) if (trace_flags & TRACE_ITER_BIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) return print_bin_fmt(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) if (trace_flags & TRACE_ITER_HEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) return print_hex_fmt(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) if (trace_flags & TRACE_ITER_RAW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) return print_raw_fmt(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) return print_trace_fmt(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) void trace_latency_header(struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) struct trace_iterator *iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) struct trace_array *tr = iter->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) /* print nothing if the buffers are empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) if (trace_empty(iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) if (iter->iter_flags & TRACE_FILE_LAT_FMT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) print_trace_header(m, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) print_lat_help_header(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) void trace_default_header(struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) struct trace_iterator *iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) struct trace_array *tr = iter->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) unsigned long trace_flags = tr->trace_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) /* print nothing if the buffers are empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) if (trace_empty(iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) print_trace_header(m, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) if (!(trace_flags & TRACE_ITER_VERBOSE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) print_lat_help_header(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) if (!(trace_flags & TRACE_ITER_VERBOSE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) if (trace_flags & TRACE_ITER_IRQ_INFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) print_func_help_header_irq(iter->array_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) m, trace_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) print_func_help_header(iter->array_buffer, m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) trace_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) static void test_ftrace_alive(struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) if (!ftrace_is_dead())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) "# MAY BE MISSING FUNCTION EVENTS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) static void show_snapshot_main_help(struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) "# Takes a snapshot of the main buffer.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) "# (Doesn't have to be '2' works with any number that\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) "# is not a '0' or '1')\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) static void show_snapshot_percpu_help(struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) "# Takes a snapshot of the main buffer for this cpu.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) "# Must use main snapshot file to allocate.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) "# (Doesn't have to be '2' works with any number that\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) "# is not a '0' or '1')\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) if (iter->tr->allocated_snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) seq_puts(m, "# Snapshot commands:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) show_snapshot_main_help(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) show_snapshot_percpu_help(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) /* Should never be called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) static int s_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) struct trace_iterator *iter = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) if (iter->ent == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) if (iter->tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) seq_printf(m, "# tracer: %s\n", iter->trace->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) seq_puts(m, "#\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) test_ftrace_alive(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) if (iter->snapshot && trace_empty(iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) print_snapshot_help(m, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) else if (iter->trace && iter->trace->print_header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) iter->trace->print_header(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) trace_default_header(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) } else if (iter->leftover) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) * If we filled the seq_file buffer earlier, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) * want to just show it now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) ret = trace_print_seq(m, &iter->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) /* ret should this time be zero, but you never know */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) iter->leftover = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) print_trace_line(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) ret = trace_print_seq(m, &iter->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) * If we overflow the seq_file buffer, then it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) * ask us for this data again at start up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) * Use that instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) * ret is 0 if seq_file write succeeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) * -1 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) iter->leftover = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) * Should be used after trace_array_get(), trace_types_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) * ensures that i_cdev was already initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) static inline int tracing_get_cpu(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) if (inode->i_cdev) /* See trace_create_cpu_file() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) return (long)inode->i_cdev - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) return RING_BUFFER_ALL_CPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) static const struct seq_operations tracer_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) .start = s_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) .next = s_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) .stop = s_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) .show = s_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) static struct trace_iterator *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) __tracing_open(struct inode *inode, struct file *file, bool snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) struct trace_iterator *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) if (tracing_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) if (!iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) if (!iter->buffer_iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) * trace_find_next_entry() may need to save off iter->ent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) * It will place it into the iter->temp buffer. As most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) * events are less than 128, allocate a buffer of that size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) * If one is greater, then trace_find_next_entry() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) * allocate a new buffer to adjust for the bigger iter->ent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) * It's not critical if it fails to get allocated here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) iter->temp = kmalloc(128, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) if (iter->temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) iter->temp_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) * We make a copy of the current tracer to avoid concurrent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) * changes on it while we are reading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) if (!iter->trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) *iter->trace = *tr->current_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) iter->tr = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) /* Currently only the top directory has a snapshot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) if (tr->current_trace->print_max || snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) iter->array_buffer = &tr->max_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) iter->array_buffer = &tr->array_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) iter->snapshot = snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) iter->pos = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) iter->cpu_file = tracing_get_cpu(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) mutex_init(&iter->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) /* Notify the tracer early; before we stop tracing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) if (iter->trace->open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) iter->trace->open(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) /* Annotate start of buffers if we had overruns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) if (ring_buffer_overruns(iter->array_buffer->buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) iter->iter_flags |= TRACE_FILE_ANNOTATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) /* Output in nanoseconds only if we are using a clock in nanoseconds. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) if (trace_clocks[tr->clock_id].in_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) * If pause-on-trace is enabled, then stop the trace while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) * dumping, unless this is the "snapshot" file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) tracing_stop_tr(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) for_each_tracing_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) iter->buffer_iter[cpu] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) ring_buffer_read_prepare(iter->array_buffer->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) cpu, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) ring_buffer_read_prepare_sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) for_each_tracing_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) ring_buffer_read_start(iter->buffer_iter[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) tracing_iter_reset(iter, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) cpu = iter->cpu_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) iter->buffer_iter[cpu] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) ring_buffer_read_prepare(iter->array_buffer->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) cpu, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) ring_buffer_read_prepare_sync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) ring_buffer_read_start(iter->buffer_iter[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) tracing_iter_reset(iter, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) return iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) kfree(iter->trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) kfree(iter->temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) kfree(iter->buffer_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) seq_release_private(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) int tracing_open_generic(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) ret = tracing_check_open_get_tr(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) filp->private_data = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) bool tracing_is_disabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) return (tracing_disabled) ? true: false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) * Open and update trace_array ref count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) * Must have the current trace_array passed to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) int tracing_open_generic_tr(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) ret = tracing_check_open_get_tr(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) filp->private_data = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) static int tracing_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) struct seq_file *m = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) struct trace_iterator *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) if (!(file->f_mode & FMODE_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) /* Writes do not use seq_file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) for_each_tracing_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) if (iter->buffer_iter[cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) ring_buffer_read_finish(iter->buffer_iter[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) if (iter->trace && iter->trace->close)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) iter->trace->close(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) if (!iter->snapshot && tr->stop_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) /* reenable tracing if it was previously enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) tracing_start_tr(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) __trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) mutex_destroy(&iter->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) free_cpumask_var(iter->started);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) kfree(iter->temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) kfree(iter->trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) kfree(iter->buffer_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) seq_release_private(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) static int tracing_release_generic_tr(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) static int tracing_single_release_tr(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) return single_release(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) static int tracing_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) struct trace_iterator *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) ret = tracing_check_open_get_tr(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) /* If this file was open for write, then erase contents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) int cpu = tracing_get_cpu(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) struct array_buffer *trace_buf = &tr->array_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) if (tr->current_trace->print_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) trace_buf = &tr->max_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) if (cpu == RING_BUFFER_ALL_CPUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) tracing_reset_online_cpus(trace_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) tracing_reset_cpu(trace_buf, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) if (file->f_mode & FMODE_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) iter = __tracing_open(inode, file, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) if (IS_ERR(iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) ret = PTR_ERR(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) iter->iter_flags |= TRACE_FILE_LAT_FMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) * Some tracers are not suitable for instance buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) * A tracer is always available for the global array (toplevel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) * or if it explicitly states that it is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) trace_ok_for_array(struct tracer *t, struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) /* Find the next tracer that this trace array may use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) static struct tracer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) get_tracer_for_array(struct trace_array *tr, struct tracer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) while (t && !trace_ok_for_array(t, tr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) t = t->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) t_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) struct trace_array *tr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) struct tracer *t = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) (*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) if (t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) t = get_tracer_for_array(tr, t->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) static void *t_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) struct trace_array *tr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) struct tracer *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) loff_t l = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) t = get_tracer_for_array(tr, trace_types);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) for (; t && l < *pos; t = t_next(m, t, &l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) static void t_stop(struct seq_file *m, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) static int t_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) struct tracer *t = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) if (!t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) seq_puts(m, t->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) if (t->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) seq_putc(m, ' ');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) seq_putc(m, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) static const struct seq_operations show_traces_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) .start = t_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) .next = t_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) .stop = t_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) .show = t_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) static int show_traces_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) struct seq_file *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) ret = tracing_check_open_get_tr(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) ret = seq_open(file, &show_traces_seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) m = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) m->private = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) static int show_traces_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) return seq_release(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) tracing_write_stub(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) if (file->f_mode & FMODE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) ret = seq_lseek(file, offset, whence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) file->f_pos = ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) static const struct file_operations tracing_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) .open = tracing_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) .write = tracing_write_stub,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) .llseek = tracing_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) .release = tracing_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) static const struct file_operations show_traces_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) .open = show_traces_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) .release = show_traces_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) tracing_cpumask_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) struct trace_array *tr = file_inode(filp)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) char *mask_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) len = snprintf(NULL, 0, "%*pb\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) cpumask_pr_args(tr->tracing_cpumask)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) mask_str = kmalloc(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) if (!mask_str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) len = snprintf(mask_str, len, "%*pb\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) cpumask_pr_args(tr->tracing_cpumask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) if (len >= count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) count = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) kfree(mask_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) int tracing_set_cpumask(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) cpumask_var_t tracing_cpumask_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) arch_spin_lock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) for_each_tracing_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) * Increase/decrease the disabled counter if we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) * about to flip a bit in the cpumask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) cpumask_test_cpu(cpu, tracing_cpumask_new)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) arch_spin_unlock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) tracing_cpumask_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) struct trace_array *tr = file_inode(filp)->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) cpumask_var_t tracing_cpumask_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) err = tracing_set_cpumask(tr, tracing_cpumask_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) free_cpumask_var(tracing_cpumask_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) free_cpumask_var(tracing_cpumask_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) static const struct file_operations tracing_cpumask_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) .open = tracing_open_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) .read = tracing_cpumask_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) .write = tracing_cpumask_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) .release = tracing_release_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) static int tracing_trace_options_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) struct tracer_opt *trace_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) struct trace_array *tr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) u32 tracer_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) tracer_flags = tr->current_trace->flags->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) trace_opts = tr->current_trace->flags->opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) for (i = 0; trace_options[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) if (tr->trace_flags & (1 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) seq_printf(m, "%s\n", trace_options[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) seq_printf(m, "no%s\n", trace_options[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) for (i = 0; trace_opts[i].name; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) if (tracer_flags & trace_opts[i].bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) seq_printf(m, "%s\n", trace_opts[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) seq_printf(m, "no%s\n", trace_opts[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) static int __set_tracer_option(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) struct tracer_flags *tracer_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) struct tracer_opt *opts, int neg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) struct tracer *trace = tracer_flags->trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) if (neg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) tracer_flags->val &= ~opts->bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) tracer_flags->val |= opts->bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) /* Try to assign a tracer specific option */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) struct tracer *trace = tr->current_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) struct tracer_flags *tracer_flags = trace->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) struct tracer_opt *opts = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) for (i = 0; tracer_flags->opts[i].name; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) opts = &tracer_flags->opts[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) if (strcmp(cmp, opts->name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) return __set_tracer_option(tr, trace->flags, opts, neg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) /* Some tracers require overwrite to stay enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) int *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) if ((mask == TRACE_ITER_RECORD_TGID) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) (mask == TRACE_ITER_RECORD_CMD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) lockdep_assert_held(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) /* do nothing if flag is already set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) if (!!(tr->trace_flags & mask) == !!enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) /* Give the tracer a chance to approve the change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) if (tr->current_trace->flag_changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) if (tr->current_trace->flag_changed(tr, mask, !!enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) if (enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) tr->trace_flags |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) tr->trace_flags &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) if (mask == TRACE_ITER_RECORD_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) trace_event_enable_cmd_record(enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) if (mask == TRACE_ITER_RECORD_TGID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) if (!tgid_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) tgid_map_max = pid_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) * Pairs with smp_load_acquire() in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) * trace_find_tgid_ptr() to ensure that if it observes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) * the tgid_map we just allocated then it also observes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) * the corresponding tgid_map_max value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) smp_store_release(&tgid_map, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) if (!tgid_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) trace_event_enable_tgid_record(enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) if (mask == TRACE_ITER_EVENT_FORK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) trace_event_follow_fork(tr, enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) if (mask == TRACE_ITER_FUNC_FORK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) ftrace_pid_follow_fork(tr, enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) if (mask == TRACE_ITER_OVERWRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) if (mask == TRACE_ITER_PRINTK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) trace_printk_start_stop_comm(enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) trace_printk_control(enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) int trace_set_options(struct trace_array *tr, char *option)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) char *cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) int neg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) size_t orig_len = strlen(option);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) cmp = strstrip(option);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) len = str_has_prefix(cmp, "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) neg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) cmp += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) mutex_lock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) ret = match_string(trace_options, -1, cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) /* If no option could be set, test the specific tracer options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) ret = set_tracer_option(tr, cmp, neg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) ret = set_tracer_flag(tr, 1 << ret, !neg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) mutex_unlock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) * If the first trailing whitespace is replaced with '\0' by strstrip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) * turn it back into a space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) if (orig_len > strlen(option))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) option[strlen(option)] = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) static void __init apply_trace_boot_options(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) char *buf = trace_boot_options_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) char *option;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) option = strsep(&buf, ",");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) if (!option)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) if (*option)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) trace_set_options(&global_trace, option);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) /* Put back the comma to allow this to be called again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) if (buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) *(buf - 1) = ',';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) tracing_trace_options_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) struct seq_file *m = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) struct trace_array *tr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) char buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) if (cnt >= sizeof(buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) if (copy_from_user(buf, ubuf, cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) buf[cnt] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) ret = trace_set_options(tr, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) *ppos += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) static int tracing_trace_options_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) ret = tracing_check_open_get_tr(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) ret = single_open(file, tracing_trace_options_show, inode->i_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) static const struct file_operations tracing_iter_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) .open = tracing_trace_options_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) .release = tracing_single_release_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) .write = tracing_trace_options_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) static const char readme_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) "tracing mini-HOWTO:\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) "# echo 0 > tracing_on : quick way to disable tracing\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) " Important files:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) " trace\t\t\t- The static contents of the buffer\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) "\t\t\t To clear the buffer write into this file: echo > trace\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) " current_tracer\t- function and latency tracers\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) " available_tracers\t- list of configured tracers for current_tracer\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) " error_log\t- error log for failed commands (that support it)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) " buffer_size_kb\t- view and modify size of per cpu buffer\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) " buffer_total_size_kb - view total size of all cpu buffers\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) " trace_clock\t\t-change the clock used to order events\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) " local: Per cpu clock but may not be synced across CPUs\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) " global: Synced across CPUs but slows tracing down.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) " counter: Not a clock, but just an increment\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) " uptime: Jiffy counter from time of boot\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) " perf: Same clock that perf events use\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) " x86-tsc: TSC cycle counter\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) "\n timestamp_mode\t-view the mode used to timestamp events\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) " delta: Delta difference against a buffer-wide timestamp\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) " absolute: Absolute (standalone) timestamp\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) " tracing_cpumask\t- Limit which CPUs to trace\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) "\t\t\t Remove sub-buffer with rmdir\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) " trace_options\t\t- Set format or modify how tracing happens\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) "\t\t\t Disable an option by prefixing 'no' to the\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) "\t\t\t option name\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) "\n available_filter_functions - list of functions that can be filtered on\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) " set_ftrace_filter\t- echo function name in here to only trace these\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) "\t\t\t functions\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) "\t accepts: func_full_name or glob-matching-pattern\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) "\t modules: Can select a group via module\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) "\t Format: :mod:<module-name>\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) "\t example: echo :mod:ext3 > set_ftrace_filter\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) "\t triggers: a command to perform when function is hit\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) "\t Format: <function>:<trigger>[:count]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) "\t trigger: traceon, traceoff\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) "\t\t enable_event:<system>:<event>\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) "\t\t disable_event:<system>:<event>\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) #ifdef CONFIG_STACKTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) "\t\t stacktrace\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) #ifdef CONFIG_TRACER_SNAPSHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) "\t\t snapshot\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) "\t\t dump\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) "\t\t cpudump\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) "\t The first one will disable tracing every time do_fault is hit\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) "\t The second will disable tracing at most 3 times when do_trap is hit\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) "\t The first time do trap is hit and it disables tracing, the\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) "\t counter will decrement to 2. If tracing is already disabled,\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) "\t the counter will not decrement. It only decrements when the\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) "\t trigger did work\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) "\t To remove trigger without count:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) "\t To remove trigger with a count:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) " set_ftrace_notrace\t- echo function name in here to never trace.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) "\t modules: Can select a group via module command :mod:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) "\t Does not accept triggers\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) #endif /* CONFIG_DYNAMIC_FTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) #ifdef CONFIG_FUNCTION_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) "\t\t (function)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) "\t\t (function)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) #ifdef CONFIG_TRACER_SNAPSHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) "\t\t\t snapshot buffer. Read the contents for more\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) "\t\t\t information\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) #ifdef CONFIG_STACK_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) " stack_trace\t\t- Shows the max stack trace when active\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) " stack_max_size\t- Shows current max stack size that was traced\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) "\t\t\t Write into this file to reset the max size (trigger a\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) "\t\t\t new trace)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) "\t\t\t traces\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) #endif /* CONFIG_STACK_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) #ifdef CONFIG_DYNAMIC_EVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) "\t\t\t Write into this file to define/undefine new trace events.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) #ifdef CONFIG_KPROBE_EVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) "\t\t\t Write into this file to define/undefine new trace events.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) #ifdef CONFIG_UPROBE_EVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) "\t\t\t Write into this file to define/undefine new trace events.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) "\t accepts: event-definitions (one definition per line)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) #ifdef CONFIG_HIST_TRIGGERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) "\t s:[synthetic/]<event> <field> [<field>]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) "\t -:[<group>/]<event>\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) #ifdef CONFIG_KPROBE_EVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) #ifdef CONFIG_UPROBE_EVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) "\t args: <name>=fetcharg[:type]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) "\t $stack<index>, $stack, $retval, $comm,\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) "\t <type>\\[<array-size>\\]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) #ifdef CONFIG_HIST_TRIGGERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) "\t field: <stype> <name>;\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) "\t [unsigned] char/int/long\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) " events/\t\t- Directory containing all trace event subsystems:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) " events/<system>/\t- Directory containing all trace events for <system>:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) "\t\t\t events\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) " filter\t\t- If set, only events passing filter are traced\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) " events/<system>/<event>/\t- Directory containing control files for\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) "\t\t\t <event>:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) " filter\t\t- If set, only events passing filter are traced\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) " trigger\t\t- If set, a command to perform when event is hit\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) "\t Format: <trigger>[:count][if <filter>]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) "\t trigger: traceon, traceoff\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) "\t enable_event:<system>:<event>\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) "\t disable_event:<system>:<event>\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) #ifdef CONFIG_HIST_TRIGGERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) "\t enable_hist:<system>:<event>\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) "\t disable_hist:<system>:<event>\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) #ifdef CONFIG_STACKTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) "\t\t stacktrace\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) #ifdef CONFIG_TRACER_SNAPSHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) "\t\t snapshot\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) #ifdef CONFIG_HIST_TRIGGERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) "\t\t hist (see below)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) "\t example: echo traceoff > events/block/block_unplug/trigger\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) "\t events/block/block_unplug/trigger\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) "\t The first disables tracing every time block_unplug is hit.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) "\t The second disables tracing the first 3 times block_unplug is hit.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) "\t The third enables the kmalloc event the first 3 times block_unplug\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) "\t Like function triggers, the counter is only decremented if it\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) "\t enabled or disabled tracing.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) "\t To remove a trigger without a count:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) "\t echo '!<trigger> > <system>/<event>/trigger\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) "\t To remove a trigger with a count:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) "\t Filters can be ignored when removing a trigger.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) #ifdef CONFIG_HIST_TRIGGERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) " hist trigger\t- If set, event hits are aggregated into a hash table\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) "\t Format: hist:keys=<field1[,field2,...]>\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) "\t [:values=<field1[,field2,...]>]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) "\t [:sort=<field1[,field2,...]>]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) "\t [:size=#entries]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) "\t [:pause][:continue][:clear]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) "\t [:name=histname1]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) "\t [:<handler>.<action>]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) "\t [if <filter>]\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) "\t Note, special fields can be used as well:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) "\t common_timestamp - to record current timestamp\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) "\t common_cpu - to record the CPU the event happened on\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) "\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) "\t When a matching event is hit, an entry is added to a hash\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) "\t table using the key(s) and value(s) named, and the value of a\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) "\t sum called 'hitcount' is incremented. Keys and values\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) "\t correspond to fields in the event's format description. Keys\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) "\t can be any field, or the special string 'stacktrace'.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) "\t Compound keys consisting of up to two fields can be specified\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) "\t by the 'keys' keyword. Values must correspond to numeric\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) "\t fields. Sort keys consisting of up to two fields can be\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) "\t specified using the 'sort' keyword. The sort direction can\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) "\t be modified by appending '.descending' or '.ascending' to a\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) "\t sort field. The 'size' parameter can be used to specify more\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) "\t or fewer than the default 2048 entries for the hashtable size.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) "\t If a hist trigger is given a name using the 'name' parameter,\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) "\t its histogram data will be shared with other triggers of the\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) "\t same name, and trigger hits will update this common data.\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) "\t Reading the 'hist' file for the event will dump the hash\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) "\t table in its entirety to stdout. If there are multiple hist\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) "\t triggers attached to an event, there will be a table for each\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) "\t trigger in the output. The table displayed for a named\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) "\t trigger will be the same as any other instance having the\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) "\t same name. The default format used to display a given field\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) "\t can be modified by appending any of the following modifiers\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) "\t to the field name, as applicable:\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) "\t .hex display a number as a hex value\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) "\t .sym display an address as a symbol\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) "\t .sym-offset display an address as a symbol and offset\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) "\t .execname display a common_pid as a program name\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) "\t .syscall display a syscall id as a syscall name\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) "\t .log2 display log2 value rather than raw number\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) "\t .usecs display a common_timestamp in microseconds\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) "\t The 'pause' parameter can be used to pause an existing hist\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) "\t trigger or to start a hist trigger but not log any events\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) "\t until told to do so. 'continue' can be used to start or\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) "\t restart a paused hist trigger.\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) "\t The 'clear' parameter will clear the contents of a running\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) "\t hist trigger and leave its current paused/active state\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) "\t unchanged.\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) "\t The enable_hist and disable_hist triggers can be used to\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) "\t have one event conditionally start and stop another event's\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) "\t already-attached hist trigger. The syntax is analogous to\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) "\t the enable_event and disable_event triggers.\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) "\t Hist trigger handlers and actions are executed whenever a\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) "\t a histogram entry is added or updated. They take the form:\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) "\t <handler>.<action>\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) "\t The available handlers are:\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) "\t onmatch(matching.event) - invoke on addition or update\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) "\t onmax(var) - invoke if var exceeds current max\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) "\t onchange(var) - invoke action if var changes\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) "\t The available actions are:\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) "\t save(field,...) - save current event fields\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) #ifdef CONFIG_TRACER_SNAPSHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) "\t snapshot() - snapshot the trace buffer\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) #ifdef CONFIG_SYNTH_EVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) "\t Write into this file to define/undefine new synthetic events.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) tracing_readme_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) return simple_read_from_buffer(ubuf, cnt, ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) readme_msg, strlen(readme_msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) static const struct file_operations tracing_readme_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) .open = tracing_open_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) .read = tracing_readme_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) int pid = ++(*pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) return trace_find_tgid_ptr(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) int pid = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) return trace_find_tgid_ptr(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) static void saved_tgids_stop(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) static int saved_tgids_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) int *entry = (int *)v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) int pid = entry - tgid_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) int tgid = *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) if (tgid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) return SEQ_SKIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) seq_printf(m, "%d %d\n", pid, tgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) static const struct seq_operations tracing_saved_tgids_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) .start = saved_tgids_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) .stop = saved_tgids_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) .next = saved_tgids_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) .show = saved_tgids_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) ret = tracing_check_open_get_tr(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) return seq_open(filp, &tracing_saved_tgids_seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) static const struct file_operations tracing_saved_tgids_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) .open = tracing_saved_tgids_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) .release = seq_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) unsigned int *ptr = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) if (*pos || m->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) (*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) ptr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) void *v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) loff_t l = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) arch_spin_lock(&trace_cmdline_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) v = &savedcmd->map_cmdline_to_pid[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) while (l <= *pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) v = saved_cmdlines_next(m, v, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) if (!v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) return v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) static void saved_cmdlines_stop(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) arch_spin_unlock(&trace_cmdline_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) static int saved_cmdlines_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) char buf[TASK_COMM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) unsigned int *pid = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) __trace_find_cmdline(*pid, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) seq_printf(m, "%d %s\n", *pid, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) .start = saved_cmdlines_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) .next = saved_cmdlines_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) .stop = saved_cmdlines_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) .show = saved_cmdlines_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) ret = tracing_check_open_get_tr(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) static const struct file_operations tracing_saved_cmdlines_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) .open = tracing_saved_cmdlines_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) .release = seq_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) char buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) arch_spin_lock(&trace_cmdline_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) arch_spin_unlock(&trace_cmdline_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) kfree(s->saved_cmdlines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) kfree(s->map_cmdline_to_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) kfree(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) static int tracing_resize_saved_cmdlines(unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) struct saved_cmdlines_buffer *s, *savedcmd_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) s = kmalloc(sizeof(*s), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) if (allocate_cmdlines_buffer(val, s) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) kfree(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) arch_spin_lock(&trace_cmdline_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) savedcmd_temp = savedcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) savedcmd = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) arch_spin_unlock(&trace_cmdline_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) free_saved_cmdlines_buffer(savedcmd_temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) /* must have at least 1 entry or less than PID_MAX_DEFAULT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) if (!val || val > PID_MAX_DEFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) ret = tracing_resize_saved_cmdlines((unsigned int)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) *ppos += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) static const struct file_operations tracing_saved_cmdlines_size_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) .open = tracing_open_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) .read = tracing_saved_cmdlines_size_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) .write = tracing_saved_cmdlines_size_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) #ifdef CONFIG_TRACE_EVAL_MAP_FILE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) static union trace_eval_map_item *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) update_eval_map(union trace_eval_map_item *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) if (!ptr->map.eval_string) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) if (ptr->tail.next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) ptr = ptr->tail.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) /* Set ptr to the next real item (skip head) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) union trace_eval_map_item *ptr = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) * Paranoid! If ptr points to end, we don't want to increment past it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) * This really should never happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) (*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) ptr = update_eval_map(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) if (WARN_ON_ONCE(!ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) ptr = update_eval_map(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) static void *eval_map_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) union trace_eval_map_item *v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) loff_t l = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) mutex_lock(&trace_eval_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) v = trace_eval_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) if (v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) v++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) while (v && l < *pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) v = eval_map_next(m, v, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) return v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) static void eval_map_stop(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) mutex_unlock(&trace_eval_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) static int eval_map_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) union trace_eval_map_item *ptr = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) seq_printf(m, "%s %ld (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) ptr->map.eval_string, ptr->map.eval_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) ptr->map.system);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) static const struct seq_operations tracing_eval_map_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) .start = eval_map_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) .next = eval_map_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) .stop = eval_map_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) .show = eval_map_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) static int tracing_eval_map_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) ret = tracing_check_open_get_tr(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) return seq_open(filp, &tracing_eval_map_seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) static const struct file_operations tracing_eval_map_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) .open = tracing_eval_map_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) .release = seq_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) static inline union trace_eval_map_item *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) /* Return tail of array given the head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) return ptr + ptr->head.length + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) struct trace_eval_map **stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) struct trace_eval_map **map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) union trace_eval_map_item *map_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) union trace_eval_map_item *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) stop = start + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) * The trace_eval_maps contains the map plus a head and tail item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) * where the head holds the module and length of array, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) * tail holds a pointer to the next list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) if (!map_array) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) pr_warn("Unable to allocate trace eval mapping\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) mutex_lock(&trace_eval_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) if (!trace_eval_maps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) trace_eval_maps = map_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) ptr = trace_eval_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) ptr = trace_eval_jmp_to_tail(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) if (!ptr->tail.next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) ptr = ptr->tail.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) ptr->tail.next = map_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) map_array->head.mod = mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) map_array->head.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) map_array++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) map_array->map = **map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) map_array++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) memset(map_array, 0, sizeof(*map_array));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) mutex_unlock(&trace_eval_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) static void trace_create_eval_file(struct dentry *d_tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) trace_create_file("eval_map", 0444, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) NULL, &tracing_eval_map_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) #else /* CONFIG_TRACE_EVAL_MAP_FILE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) static inline void trace_create_eval_file(struct dentry *d_tracer) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) static inline void trace_insert_eval_map_file(struct module *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) struct trace_eval_map **start, int len) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) static void trace_insert_eval_map(struct module *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) struct trace_eval_map **start, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) struct trace_eval_map **map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) if (len <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) map = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) trace_event_eval_update(map, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) trace_insert_eval_map_file(mod, start, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) tracing_set_trace_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) struct trace_array *tr = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) char buf[MAX_TRACER_SIZE+2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) r = sprintf(buf, "%s\n", tr->current_trace->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) int tracer_init(struct tracer *t, struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) tracing_reset_online_cpus(&tr->array_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) return t->init(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) for_each_tracing_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) per_cpu_ptr(buf->data, cpu)->entries = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) /* resize @tr's buffer to the size of @size_tr's entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) struct array_buffer *size_buf, int cpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) int cpu, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) if (cpu_id == RING_BUFFER_ALL_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) for_each_tracing_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) ret = ring_buffer_resize(trace_buf->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) per_cpu_ptr(trace_buf->data, cpu)->entries =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) per_cpu_ptr(size_buf->data, cpu)->entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) ret = ring_buffer_resize(trace_buf->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) per_cpu_ptr(trace_buf->data, cpu_id)->entries =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) per_cpu_ptr(size_buf->data, cpu_id)->entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) #endif /* CONFIG_TRACER_MAX_TRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) static int __tracing_resize_ring_buffer(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) unsigned long size, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) * If kernel or user changes the size of the ring buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) * we use the size that was given, and we can forget about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) * expanding it later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) ring_buffer_expanded = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) /* May be called before buffers are initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) if (!tr->array_buffer.buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) !tr->current_trace->use_max_tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) int r = resize_buffer_duplicate_size(&tr->array_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) &tr->array_buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) * AARGH! We are left with different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) * size max buffer!!!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) * The max buffer is our "snapshot" buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) * When a tracer needs a snapshot (one of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) * latency tracers), it swaps the max buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) * with the saved snap shot. We succeeded to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) * update the size of the main buffer, but failed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) * update the size of the max buffer. But when we tried
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) * to reset the main buffer to the original size, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) * failed there too. This is very unlikely to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) * happen, but if it does, warn and kill all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) * tracing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) tracing_disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) if (cpu == RING_BUFFER_ALL_CPUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) set_buffer_entries(&tr->max_buffer, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) #endif /* CONFIG_TRACER_MAX_TRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) if (cpu == RING_BUFFER_ALL_CPUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) set_buffer_entries(&tr->array_buffer, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) unsigned long size, int cpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) int ret = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) if (cpu_id != RING_BUFFER_ALL_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) /* make sure, this cpu is enabled in the mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) * tracing_update_buffers - used by tracing facility to expand ring buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) * To save on memory when the tracing is never used on a system with it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) * configured in. The ring buffers are set to a minimum size. But once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) * a user starts to use the tracing facility, then they need to grow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) * to their default size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) * This function is to be called when a tracer is about to be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) int tracing_update_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) if (!ring_buffer_expanded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) RING_BUFFER_ALL_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) struct trace_option_dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) * Used to clear out the tracer before deletion of an instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) * Must have trace_types_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) static void tracing_set_nop(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) if (tr->current_trace == &nop_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) tr->current_trace->enabled--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) if (tr->current_trace->reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) tr->current_trace->reset(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) tr->current_trace = &nop_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) static void add_tracer_options(struct trace_array *tr, struct tracer *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) /* Only enable if the directory has been created already. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914) if (!tr->dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) create_trace_option_files(tr, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) int tracing_set_tracer(struct trace_array *tr, const char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) struct tracer *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) bool had_max_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) if (!ring_buffer_expanded) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) RING_BUFFER_ALL_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) for (t = trace_types; t; t = t->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939) if (strcmp(t->name, buf) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) if (!t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) if (t == tr->current_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) #ifdef CONFIG_TRACER_SNAPSHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) if (t->use_max_tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) arch_spin_lock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) if (tr->cond_snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954) arch_spin_unlock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) /* Some tracers won't work on kernel command line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) if (system_state < SYSTEM_RUNNING && t->noboot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) t->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) /* Some tracers are only allowed for the top level buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) if (!trace_ok_for_array(t, tr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) /* If trace pipe files are being read, we can't change the tracer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) if (tr->trace_ref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978) trace_branch_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) tr->current_trace->enabled--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) if (tr->current_trace->reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) tr->current_trace->reset(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) /* Current trace needs to be nop_trace before synchronize_rcu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) tr->current_trace = &nop_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) had_max_tr = tr->allocated_snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) if (had_max_tr && !t->use_max_tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) * We need to make sure that the update_max_tr sees that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) * current_trace changed to nop_trace to keep it from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) * swapping the buffers after we resize it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996) * The update_max_tr is called from interrupts disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) * so a synchronized_sched() is sufficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) free_snapshot(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) if (t->use_max_tr && !had_max_tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) ret = tracing_alloc_snapshot_instance(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) if (t->init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) ret = tracer_init(t, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) tr->current_trace = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) tr->current_trace->enabled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020) trace_branch_enable(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) tracing_set_trace_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) struct trace_array *tr = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) char buf[MAX_TRACER_SIZE+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) size_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) ret = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) if (cnt > MAX_TRACER_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) cnt = MAX_TRACER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042) if (copy_from_user(buf, ubuf, cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) buf[cnt] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) /* strip ending whitespace. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) buf[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) err = tracing_set_tracer(tr, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) *ppos += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) char buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) r = snprintf(buf, sizeof(buf), "%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) if (r > sizeof(buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) r = sizeof(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071) return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) *ptr = val * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) tracing_thresh_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) tracing_thresh_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) struct trace_array *tr = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109) if (tr->current_trace->update_thresh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110) ret = tr->current_trace->update_thresh(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) ret = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122) #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) tracing_max_lat_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128) return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132) tracing_max_lat_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135) return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) static int tracing_open_pipe(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143) struct trace_iterator *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) ret = tracing_check_open_get_tr(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152) /* create a buffer to store the information to pass to userspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) iter = kzalloc(sizeof(*iter), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154) if (!iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) __trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160) trace_seq_init(&iter->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161) iter->trace = tr->current_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) /* trace pipe does not show start of buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) cpumask_setall(iter->started);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172) iter->iter_flags |= TRACE_FILE_LAT_FMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174) /* Output in nanoseconds only if we are using a clock in nanoseconds. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) if (trace_clocks[tr->clock_id].in_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178) iter->tr = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) iter->array_buffer = &tr->array_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) iter->cpu_file = tracing_get_cpu(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) mutex_init(&iter->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182) filp->private_data = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) if (iter->trace->pipe_open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) iter->trace->pipe_open(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) nonseekable_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) tr->trace_ref++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) kfree(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196) __trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) static int tracing_release_pipe(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203) struct trace_iterator *iter = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) tr->trace_ref--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210) if (iter->trace->pipe_close)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) iter->trace->pipe_close(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) free_cpumask_var(iter->started);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216) mutex_destroy(&iter->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217) kfree(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224) static __poll_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225) trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227) struct trace_array *tr = iter->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) /* Iterators are static, they should be filled or empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) if (trace_buffer_iter(iter, iter->cpu_file))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) return EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) if (tr->trace_flags & TRACE_ITER_BLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235) * Always select as readable when in blocking mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) return EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240) filp, poll_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) static __poll_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) tracing_poll_pipe(struct file *filp, poll_table *poll_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246) struct trace_iterator *iter = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248) return trace_poll(iter, filp, poll_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251) /* Must be called with iter->mutex held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252) static int tracing_wait_pipe(struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254) struct trace_iterator *iter = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) while (trace_empty(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259) if ((filp->f_flags & O_NONBLOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264) * We block until we read something and tracing is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265) * We still block if tracing is disabled, but we have never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) * read anything. This allows a user to cat this file, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) * then enable tracing. But after we have read something,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) * we give an EOF when tracing is again disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) * iter->pos will be 0 if we haven't read anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272) if (!tracer_tracing_is_on(iter->tr) && iter->pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275) mutex_unlock(&iter->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277) ret = wait_on_pipe(iter, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) mutex_lock(&iter->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289) * Consumer reader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292) tracing_read_pipe(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295) struct trace_iterator *iter = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296) ssize_t sret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) * Avoid more than one consumer on a single file descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300) * This is just a matter of traces coherency, the ring buffer itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301) * is protected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) mutex_lock(&iter->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) /* return any leftover data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306) sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307) if (sret != -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) trace_seq_init(&iter->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312) if (iter->trace->read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313) sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314) if (sret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) waitagain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319) sret = tracing_wait_pipe(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320) if (sret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323) /* stop when tracing is finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324) if (trace_empty(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325) sret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) if (cnt >= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330) cnt = PAGE_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) /* reset all but tr, trace, and overruns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) memset(&iter->seq, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334) sizeof(struct trace_iterator) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335) offsetof(struct trace_iterator, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336) cpumask_clear(iter->started);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337) trace_seq_init(&iter->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338) iter->pos = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340) trace_event_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341) trace_access_lock(iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) while (trace_find_next_entry_inc(iter) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343) enum print_line_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344) int save_len = iter->seq.seq.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346) ret = print_trace_line(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) if (ret == TRACE_TYPE_PARTIAL_LINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348) /* don't print partial lines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349) iter->seq.seq.len = save_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) if (ret != TRACE_TYPE_NO_CONSUME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353) trace_consume(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355) if (trace_seq_used(&iter->seq) >= cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359) * Setting the full flag means we reached the trace_seq buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360) * size and we should leave by partial output condition above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) * One of the trace_seq_* functions is not used properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363) WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364) iter->ent->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366) trace_access_unlock(iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367) trace_event_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369) /* Now copy what we have to the user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370) sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371) if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372) trace_seq_init(&iter->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375) * If there was nothing to send to user, in spite of consuming trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376) * entries, go back to wait for more entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) if (sret == -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379) goto waitagain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382) mutex_unlock(&iter->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384) return sret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387) static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388) unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) __free_page(spd->pages[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393) static size_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394) tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396) size_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397) int save_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400) /* Seq buffer is page-sized, exactly what we need. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402) save_len = iter->seq.seq.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403) ret = print_trace_line(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405) if (trace_seq_has_overflowed(&iter->seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406) iter->seq.seq.len = save_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411) * This should not be hit, because it should only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) * be set if the iter->seq overflowed. But check it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413) * anyway to be safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) if (ret == TRACE_TYPE_PARTIAL_LINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416) iter->seq.seq.len = save_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420) count = trace_seq_used(&iter->seq) - save_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421) if (rem < count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422) rem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423) iter->seq.seq.len = save_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427) if (ret != TRACE_TYPE_NO_CONSUME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428) trace_consume(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429) rem -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430) if (!trace_find_next_entry_inc(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431) rem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432) iter->ent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) return rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440) static ssize_t tracing_splice_read_pipe(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441) loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442) struct pipe_inode_info *pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443) size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446) struct page *pages_def[PIPE_DEF_BUFFERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447) struct partial_page partial_def[PIPE_DEF_BUFFERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) struct trace_iterator *iter = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449) struct splice_pipe_desc spd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450) .pages = pages_def,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) .partial = partial_def,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452) .nr_pages = 0, /* This gets updated below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453) .nr_pages_max = PIPE_DEF_BUFFERS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454) .ops = &default_pipe_buf_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455) .spd_release = tracing_spd_release_pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458) size_t rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461) if (splice_grow_spd(pipe, &spd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464) mutex_lock(&iter->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466) if (iter->trace->splice_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467) ret = iter->trace->splice_read(iter, filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468) ppos, pipe, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473) ret = tracing_wait_pipe(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477) if (!iter->ent && !trace_find_next_entry_inc(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) trace_event_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483) trace_access_lock(iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485) /* Fill as many pages as possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487) spd.pages[i] = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488) if (!spd.pages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491) rem = tracing_fill_pipe_page(rem, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493) /* Copy the data into the page, so we can start over. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) ret = trace_seq_to_buffer(&iter->seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495) page_address(spd.pages[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496) trace_seq_used(&iter->seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498) __free_page(spd.pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501) spd.partial[i].offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) spd.partial[i].len = trace_seq_used(&iter->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504) trace_seq_init(&iter->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507) trace_access_unlock(iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508) trace_event_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509) mutex_unlock(&iter->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511) spd.nr_pages = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514) ret = splice_to_pipe(pipe, &spd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518) splice_shrink_spd(&spd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) mutex_unlock(&iter->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527) tracing_entries_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532) int cpu = tracing_get_cpu(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) char buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539) if (cpu == RING_BUFFER_ALL_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540) int cpu, buf_size_same;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543) size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) buf_size_same = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) /* check if all cpu sizes are same */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546) for_each_tracing_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547) /* fill in the size from first enabled cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) if (size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550) if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) buf_size_same = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556) if (buf_size_same) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557) if (!ring_buffer_expanded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558) r = sprintf(buf, "%lu (expanded: %lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559) size >> 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560) trace_buf_size >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562) r = sprintf(buf, "%lu\n", size >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564) r = sprintf(buf, "X\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566) r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570) ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575) tracing_entries_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583) ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587) /* must have at least 1 entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591) /* value is in KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592) val <<= 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593) ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597) *ppos += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603) tracing_total_entries_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606) struct trace_array *tr = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607) char buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608) int r, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609) unsigned long size = 0, expanded_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612) for_each_tracing_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613) size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614) if (!ring_buffer_expanded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) expanded_size += trace_buf_size >> 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617) if (ring_buffer_expanded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618) r = sprintf(buf, "%lu\n", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620) r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623) return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627) tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631) * There is no need to read what the user has written, this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632) * is just to make sure that there is no error when "echo" is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635) *ppos += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641) tracing_free_buffer_release(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645) /* disable tracing ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646) if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647) tracer_tracing_off(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648) /* resize the ring buffer to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657) tracing_mark_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658) size_t cnt, loff_t *fpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660) struct trace_array *tr = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662) enum event_trigger_type tt = ETT_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663) struct trace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664) struct print_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666) ssize_t written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670) /* Used in tracing_mark_raw_write() as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671) #define FAULTED_STR "<faulted>"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672) #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674) if (tracing_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677) if (!(tr->trace_flags & TRACE_ITER_MARKERS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680) if (cnt > TRACE_BUF_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) cnt = TRACE_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683) BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685) local_save_flags(irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686) size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) /* If less than "<faulted>", then make sure we can still add that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689) if (cnt < FAULTED_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690) size += FAULTED_SIZE - cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692) buffer = tr->array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693) event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) irq_flags, preempt_count());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695) if (unlikely(!event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696) /* Ring buffer disabled, return as if not open for write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697) return -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700) entry->ip = _THIS_IP_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702) len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703) if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705) cnt = FAULTED_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706) written = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) written = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710) if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711) /* do not add \n before testing triggers, but add \0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712) entry->buf[cnt] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713) tt = event_triggers_call(tr->trace_marker_file, entry, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) if (entry->buf[cnt - 1] != '\n') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717) entry->buf[cnt] = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) entry->buf[cnt + 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) entry->buf[cnt] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722) if (static_branch_unlikely(&trace_marker_exports_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) ftrace_exports(event, TRACE_EXPORT_MARKER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) __buffer_unlock_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) if (tt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727) event_triggers_post_call(tr->trace_marker_file, tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) if (written > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730) *fpos += written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732) return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735) /* Limit it for now to 3K (including tag) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736) #define RAW_DATA_MAX_SIZE (1024*3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739) tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740) size_t cnt, loff_t *fpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) struct trace_array *tr = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743) struct ring_buffer_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744) struct trace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745) struct raw_data_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747) ssize_t written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751) #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753) if (tracing_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756) if (!(tr->trace_flags & TRACE_ITER_MARKERS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759) /* The marker must at least have a tag id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763) if (cnt > TRACE_BUF_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764) cnt = TRACE_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768) local_save_flags(irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769) size = sizeof(*entry) + cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770) if (cnt < FAULT_SIZE_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771) size += FAULT_SIZE_ID - cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773) buffer = tr->array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774) event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775) irq_flags, preempt_count());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) /* Ring buffer disabled, return as if not open for write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778) return -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780) entry = ring_buffer_event_data(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6783) if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6784) entry->id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6785) memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6786) written = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6787) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6788) written = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6790) __buffer_unlock_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6792) if (written > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6793) *fpos += written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6795) return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6798) static int tracing_clock_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6800) struct trace_array *tr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6801) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6803) for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6804) seq_printf(m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6805) "%s%s%s%s", i ? " " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6806) i == tr->clock_id ? "[" : "", trace_clocks[i].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6807) i == tr->clock_id ? "]" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6808) seq_putc(m, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6810) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6813) int tracing_set_clock(struct trace_array *tr, const char *clockstr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6815) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6817) for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6818) if (strcmp(trace_clocks[i].name, clockstr) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6819) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6821) if (i == ARRAY_SIZE(trace_clocks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6822) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6824) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6826) tr->clock_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6828) ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6831) * New clock may not be consistent with the previous clock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6832) * Reset the buffer so that it doesn't have incomparable timestamps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6834) tracing_reset_online_cpus(&tr->array_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6836) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6837) if (tr->max_buffer.buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6838) ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6839) tracing_reset_online_cpus(&tr->max_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6840) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6842) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6844) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6847) static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6848) size_t cnt, loff_t *fpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6850) struct seq_file *m = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6851) struct trace_array *tr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6852) char buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6853) const char *clockstr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6854) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6856) if (cnt >= sizeof(buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6857) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6859) if (copy_from_user(buf, ubuf, cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6860) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6862) buf[cnt] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6864) clockstr = strstrip(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6866) ret = tracing_set_clock(tr, clockstr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6867) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6868) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6870) *fpos += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6872) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6875) static int tracing_clock_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6877) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6878) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6880) ret = tracing_check_open_get_tr(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6881) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6882) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6884) ret = single_open(file, tracing_clock_show, inode->i_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6885) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6886) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6888) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6891) static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6893) struct trace_array *tr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6895) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6897) if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6898) seq_puts(m, "delta [absolute]\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6899) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6900) seq_puts(m, "[delta] absolute\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6902) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6904) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6907) static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6909) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6910) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6912) ret = tracing_check_open_get_tr(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6913) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6914) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6916) ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6917) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6918) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6920) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6923) int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6925) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6927) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6929) if (abs && tr->time_stamp_abs_ref++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6930) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6932) if (!abs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6933) if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6934) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6935) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6938) if (--tr->time_stamp_abs_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6939) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6942) ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6944) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6945) if (tr->max_buffer.buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6946) ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6947) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6948) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6949) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6951) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6954) struct ftrace_buffer_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6955) struct trace_iterator iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6956) void *spare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6957) unsigned int spare_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6958) unsigned int read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6959) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6961) #ifdef CONFIG_TRACER_SNAPSHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6962) static int tracing_snapshot_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6964) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6965) struct trace_iterator *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6966) struct seq_file *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6967) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6969) ret = tracing_check_open_get_tr(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6970) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6971) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6973) if (file->f_mode & FMODE_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6974) iter = __tracing_open(inode, file, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6975) if (IS_ERR(iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6976) ret = PTR_ERR(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6977) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6978) /* Writes still need the seq_file to hold the private data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6979) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6980) m = kzalloc(sizeof(*m), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6981) if (!m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6982) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6983) iter = kzalloc(sizeof(*iter), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6984) if (!iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6985) kfree(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6986) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6988) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6990) iter->tr = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6991) iter->array_buffer = &tr->max_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6992) iter->cpu_file = tracing_get_cpu(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6993) m->private = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6994) file->private_data = m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6996) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6997) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6998) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7000) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7003) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7004) tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7005) loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7007) struct seq_file *m = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7008) struct trace_iterator *iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7009) struct trace_array *tr = iter->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7010) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7011) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7013) ret = tracing_update_buffers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7014) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7015) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7017) ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7018) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7019) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7021) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7023) if (tr->current_trace->use_max_tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7024) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7025) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7028) arch_spin_lock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7029) if (tr->cond_snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7030) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7031) arch_spin_unlock(&tr->max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7032) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7033) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7035) switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7036) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7037) if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7038) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7039) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7041) if (tr->allocated_snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7042) free_snapshot(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7043) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7044) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7045) /* Only allow per-cpu swap if the ring buffer supports it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7046) #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7047) if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7048) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7049) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7051) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7052) if (tr->allocated_snapshot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7053) ret = resize_buffer_duplicate_size(&tr->max_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7054) &tr->array_buffer, iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7055) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7056) ret = tracing_alloc_snapshot_instance(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7057) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7058) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7059) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7060) /* Now, we're going to swap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7061) if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7062) update_max_tr(tr, current, smp_processor_id(), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7063) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7064) update_max_tr_single(tr, current, iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7065) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7066) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7067) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7068) if (tr->allocated_snapshot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7069) if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7070) tracing_reset_online_cpus(&tr->max_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7071) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7072) tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7074) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7077) if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7078) *ppos += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7079) ret = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7081) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7082) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7083) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7086) static int tracing_snapshot_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7088) struct seq_file *m = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7089) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7091) ret = tracing_release(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7093) if (file->f_mode & FMODE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7094) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7096) /* If write only, the seq_file is just a stub */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7097) if (m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7098) kfree(m->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7099) kfree(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7101) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7104) static int tracing_buffers_open(struct inode *inode, struct file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7105) static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7106) size_t count, loff_t *ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7107) static int tracing_buffers_release(struct inode *inode, struct file *file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7108) static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7109) struct pipe_inode_info *pipe, size_t len, unsigned int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7111) static int snapshot_raw_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7113) struct ftrace_buffer_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7114) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7116) /* The following checks for tracefs lockdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7117) ret = tracing_buffers_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7118) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7119) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7121) info = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7123) if (info->iter.trace->use_max_tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7124) tracing_buffers_release(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7125) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7128) info->iter.snapshot = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7129) info->iter.array_buffer = &info->iter.tr->max_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7131) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7134) #endif /* CONFIG_TRACER_SNAPSHOT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7137) static const struct file_operations tracing_thresh_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7138) .open = tracing_open_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7139) .read = tracing_thresh_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7140) .write = tracing_thresh_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7141) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7144) #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7145) static const struct file_operations tracing_max_lat_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7146) .open = tracing_open_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7147) .read = tracing_max_lat_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7148) .write = tracing_max_lat_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7149) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7151) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7153) static const struct file_operations set_tracer_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7154) .open = tracing_open_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7155) .read = tracing_set_trace_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7156) .write = tracing_set_trace_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7157) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7160) static const struct file_operations tracing_pipe_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7161) .open = tracing_open_pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7162) .poll = tracing_poll_pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7163) .read = tracing_read_pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7164) .splice_read = tracing_splice_read_pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7165) .release = tracing_release_pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7166) .llseek = no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7167) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7169) static const struct file_operations tracing_entries_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7170) .open = tracing_open_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7171) .read = tracing_entries_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7172) .write = tracing_entries_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7173) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7174) .release = tracing_release_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7175) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7177) static const struct file_operations tracing_total_entries_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7178) .open = tracing_open_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7179) .read = tracing_total_entries_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7180) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7181) .release = tracing_release_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7184) static const struct file_operations tracing_free_buffer_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7185) .open = tracing_open_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7186) .write = tracing_free_buffer_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7187) .release = tracing_free_buffer_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7188) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7190) static const struct file_operations tracing_mark_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7191) .open = tracing_open_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7192) .write = tracing_mark_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7193) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7194) .release = tracing_release_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7195) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7197) static const struct file_operations tracing_mark_raw_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7198) .open = tracing_open_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7199) .write = tracing_mark_raw_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7200) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7201) .release = tracing_release_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7204) static const struct file_operations trace_clock_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7205) .open = tracing_clock_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7206) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7207) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7208) .release = tracing_single_release_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7209) .write = tracing_clock_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7212) static const struct file_operations trace_time_stamp_mode_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7213) .open = tracing_time_stamp_mode_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7214) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7215) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7216) .release = tracing_single_release_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7219) #ifdef CONFIG_TRACER_SNAPSHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7220) static const struct file_operations snapshot_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7221) .open = tracing_snapshot_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7222) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7223) .write = tracing_snapshot_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7224) .llseek = tracing_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7225) .release = tracing_snapshot_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7228) static const struct file_operations snapshot_raw_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7229) .open = snapshot_raw_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7230) .read = tracing_buffers_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7231) .release = tracing_buffers_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7232) .splice_read = tracing_buffers_splice_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7233) .llseek = no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7234) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7236) #endif /* CONFIG_TRACER_SNAPSHOT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7238) #define TRACING_LOG_ERRS_MAX 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7239) #define TRACING_LOG_LOC_MAX 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7241) #define CMD_PREFIX " Command: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7243) struct err_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7244) const char **errs; /* ptr to loc-specific array of err strings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7245) u8 type; /* index into errs -> specific err string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7246) u8 pos; /* MAX_FILTER_STR_VAL = 256 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7247) u64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7250) struct tracing_log_err {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7251) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7252) struct err_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7253) char loc[TRACING_LOG_LOC_MAX]; /* err location */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7254) char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7257) static DEFINE_MUTEX(tracing_err_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7259) static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7261) struct tracing_log_err *err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7263) if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7264) err = kzalloc(sizeof(*err), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7265) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7266) err = ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7267) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7268) tr->n_err_log_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7270) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7273) err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7274) list_del(&err->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7276) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7279) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7280) * err_pos - find the position of a string within a command for error careting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7281) * @cmd: The tracing command that caused the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7282) * @str: The string to position the caret at within @cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7284) * Finds the position of the first occurence of @str within @cmd. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7285) * return value can be passed to tracing_log_err() for caret placement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7286) * within @cmd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7287) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7288) * Returns the index within @cmd of the first occurence of @str or 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7289) * if @str was not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7291) unsigned int err_pos(char *cmd, const char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7293) char *found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7295) if (WARN_ON(!strlen(cmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7298) found = strstr(cmd, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7299) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7300) return found - cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7302) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7305) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7306) * tracing_log_err - write an error to the tracing error log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7307) * @tr: The associated trace array for the error (NULL for top level array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7308) * @loc: A string describing where the error occurred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7309) * @cmd: The tracing command that caused the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7310) * @errs: The array of loc-specific static error strings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7311) * @type: The index into errs[], which produces the specific static err string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7312) * @pos: The position the caret should be placed in the cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7314) * Writes an error into tracing/error_log of the form:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7315) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7316) * <loc>: error: <text>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7317) * Command: <cmd>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7318) * ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7319) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7320) * tracing/error_log is a small log file containing the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7321) * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7322) * unless there has been a tracing error, and the error log can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7323) * cleared and have its memory freed by writing the empty string in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7324) * truncation mode to it i.e. echo > tracing/error_log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7326) * NOTE: the @errs array along with the @type param are used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7327) * produce a static error string - this string is not copied and saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7328) * when the error is logged - only a pointer to it is saved. See
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7329) * existing callers for examples of how static strings are typically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7330) * defined for use with tracing_log_err().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7332) void tracing_log_err(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7333) const char *loc, const char *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7334) const char **errs, u8 type, u8 pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7336) struct tracing_log_err *err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7338) if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7339) tr = &global_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7341) mutex_lock(&tracing_err_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7342) err = get_tracing_log_err(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7343) if (PTR_ERR(err) == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7344) mutex_unlock(&tracing_err_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7345) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7348) snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7349) snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7351) err->info.errs = errs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7352) err->info.type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7353) err->info.pos = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7354) err->info.ts = local_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7356) list_add_tail(&err->list, &tr->err_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7357) mutex_unlock(&tracing_err_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7360) static void clear_tracing_err_log(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7362) struct tracing_log_err *err, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7364) mutex_lock(&tracing_err_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7365) list_for_each_entry_safe(err, next, &tr->err_log, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7366) list_del(&err->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7367) kfree(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7370) tr->n_err_log_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7371) mutex_unlock(&tracing_err_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7374) static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7376) struct trace_array *tr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7378) mutex_lock(&tracing_err_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7380) return seq_list_start(&tr->err_log, *pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7383) static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7385) struct trace_array *tr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7387) return seq_list_next(v, &tr->err_log, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7390) static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7392) mutex_unlock(&tracing_err_log_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7395) static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7397) u8 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7399) for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7400) seq_putc(m, ' ');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7401) for (i = 0; i < pos; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7402) seq_putc(m, ' ');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7403) seq_puts(m, "^\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7406) static int tracing_err_log_seq_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7408) struct tracing_log_err *err = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7410) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7411) const char *err_text = err->info.errs[err->info.type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7412) u64 sec = err->info.ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7413) u32 nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7415) nsec = do_div(sec, NSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7416) seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7417) err->loc, err_text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7418) seq_printf(m, "%s", err->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7419) tracing_err_log_show_pos(m, err->info.pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7425) static const struct seq_operations tracing_err_log_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7426) .start = tracing_err_log_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7427) .next = tracing_err_log_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7428) .stop = tracing_err_log_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7429) .show = tracing_err_log_seq_show
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7430) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7432) static int tracing_err_log_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7434) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7435) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7437) ret = tracing_check_open_get_tr(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7438) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7439) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7441) /* If this file was opened for write, then erase contents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7442) if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7443) clear_tracing_err_log(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7445) if (file->f_mode & FMODE_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7446) ret = seq_open(file, &tracing_err_log_seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7447) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7448) struct seq_file *m = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7449) m->private = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7450) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7451) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7454) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7457) static ssize_t tracing_err_log_write(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7458) const char __user *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7459) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7461) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7464) static int tracing_err_log_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7466) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7468) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7470) if (file->f_mode & FMODE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7471) seq_release(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7473) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7476) static const struct file_operations tracing_err_log_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7477) .open = tracing_err_log_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7478) .write = tracing_err_log_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7479) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7480) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7481) .release = tracing_err_log_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7482) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7484) static int tracing_buffers_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7486) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7487) struct ftrace_buffer_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7488) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7490) ret = tracing_check_open_get_tr(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7491) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7492) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7494) info = kvzalloc(sizeof(*info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7495) if (!info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7496) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7497) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7500) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7502) info->iter.tr = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7503) info->iter.cpu_file = tracing_get_cpu(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7504) info->iter.trace = tr->current_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7505) info->iter.array_buffer = &tr->array_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7506) info->spare = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7507) /* Force reading ring buffer for first read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7508) info->read = (unsigned int)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7510) filp->private_data = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7512) tr->trace_ref++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7514) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7516) ret = nonseekable_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7517) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7518) trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7520) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7523) static __poll_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7524) tracing_buffers_poll(struct file *filp, poll_table *poll_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7526) struct ftrace_buffer_info *info = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7527) struct trace_iterator *iter = &info->iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7529) return trace_poll(iter, filp, poll_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7532) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7533) tracing_buffers_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7534) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7536) struct ftrace_buffer_info *info = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7537) struct trace_iterator *iter = &info->iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7538) ssize_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7539) ssize_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7541) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7542) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7544) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7545) if (iter->snapshot && iter->tr->current_trace->use_max_tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7546) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7547) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7549) if (!info->spare) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7550) info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7551) iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7552) if (IS_ERR(info->spare)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7553) ret = PTR_ERR(info->spare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7554) info->spare = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7555) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7556) info->spare_cpu = iter->cpu_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7559) if (!info->spare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7560) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7562) /* Do we have previous read data to read? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7563) if (info->read < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7564) goto read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7566) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7567) trace_access_lock(iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7568) ret = ring_buffer_read_page(iter->array_buffer->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7569) &info->spare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7570) count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7571) iter->cpu_file, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7572) trace_access_unlock(iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7574) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7575) if (trace_empty(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7576) if ((filp->f_flags & O_NONBLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7577) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7579) ret = wait_on_pipe(iter, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7580) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7581) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7583) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7585) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7588) info->read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7589) read:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7590) size = PAGE_SIZE - info->read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7591) if (size > count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7592) size = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7594) ret = copy_to_user(ubuf, info->spare + info->read, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7595) if (ret == size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7596) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7598) size -= ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7600) *ppos += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7601) info->read += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7603) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7606) static int tracing_buffers_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7608) struct ftrace_buffer_info *info = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7609) struct trace_iterator *iter = &info->iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7611) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7613) iter->tr->trace_ref--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7615) __trace_array_put(iter->tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7617) if (info->spare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7618) ring_buffer_free_read_page(iter->array_buffer->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7619) info->spare_cpu, info->spare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7620) kvfree(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7622) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7624) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7627) struct buffer_ref {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7628) struct trace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7629) void *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7630) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7631) refcount_t refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7632) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7634) static void buffer_ref_release(struct buffer_ref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7636) if (!refcount_dec_and_test(&ref->refcount))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7637) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7638) ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7639) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7642) static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7643) struct pipe_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7645) struct buffer_ref *ref = (struct buffer_ref *)buf->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7647) buffer_ref_release(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7648) buf->private = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7651) static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7652) struct pipe_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7654) struct buffer_ref *ref = (struct buffer_ref *)buf->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7656) if (refcount_read(&ref->refcount) > INT_MAX/2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7657) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7659) refcount_inc(&ref->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7660) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7663) /* Pipe buffer operations for a buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7664) static const struct pipe_buf_operations buffer_pipe_buf_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7665) .release = buffer_pipe_buf_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7666) .get = buffer_pipe_buf_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7667) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7669) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7670) * Callback from splice_to_pipe(), if we need to release some pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7671) * at the end of the spd in case we error'ed out in filling the pipe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7673) static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7675) struct buffer_ref *ref =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7676) (struct buffer_ref *)spd->partial[i].private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7678) buffer_ref_release(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7679) spd->partial[i].private = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7682) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7683) tracing_buffers_splice_read(struct file *file, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7684) struct pipe_inode_info *pipe, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7685) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7687) struct ftrace_buffer_info *info = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7688) struct trace_iterator *iter = &info->iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7689) struct partial_page partial_def[PIPE_DEF_BUFFERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7690) struct page *pages_def[PIPE_DEF_BUFFERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7691) struct splice_pipe_desc spd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7692) .pages = pages_def,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7693) .partial = partial_def,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7694) .nr_pages_max = PIPE_DEF_BUFFERS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7695) .ops = &buffer_pipe_buf_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7696) .spd_release = buffer_spd_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7697) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7698) struct buffer_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7699) int entries, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7700) ssize_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7702) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7703) if (iter->snapshot && iter->tr->current_trace->use_max_tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7704) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7705) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7707) if (*ppos & (PAGE_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7708) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7710) if (len & (PAGE_SIZE - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7711) if (len < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7712) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7713) len &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7716) if (splice_grow_spd(pipe, &spd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7717) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7719) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7720) trace_access_lock(iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7721) entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7723) for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7724) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7725) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7727) ref = kzalloc(sizeof(*ref), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7728) if (!ref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7729) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7730) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7733) refcount_set(&ref->refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7734) ref->buffer = iter->array_buffer->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7735) ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7736) if (IS_ERR(ref->page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7737) ret = PTR_ERR(ref->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7738) ref->page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7739) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7740) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7742) ref->cpu = iter->cpu_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7744) r = ring_buffer_read_page(ref->buffer, &ref->page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7745) len, iter->cpu_file, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7746) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7747) ring_buffer_free_read_page(ref->buffer, ref->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7748) ref->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7749) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7750) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7753) page = virt_to_page(ref->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7755) spd.pages[i] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7756) spd.partial[i].len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7757) spd.partial[i].offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7758) spd.partial[i].private = (unsigned long)ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7759) spd.nr_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7760) *ppos += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7762) entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7765) trace_access_unlock(iter->cpu_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7766) spd.nr_pages = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7768) /* did we read anything? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7769) if (!spd.nr_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7770) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7771) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7773) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7774) if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7775) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7777) ret = wait_on_pipe(iter, iter->tr->buffer_percent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7778) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7779) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7781) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7784) ret = splice_to_pipe(pipe, &spd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7785) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7786) splice_shrink_spd(&spd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7788) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7791) static const struct file_operations tracing_buffers_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7792) .open = tracing_buffers_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7793) .read = tracing_buffers_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7794) .poll = tracing_buffers_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7795) .release = tracing_buffers_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7796) .splice_read = tracing_buffers_splice_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7797) .llseek = no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7798) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7800) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7801) tracing_stats_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7802) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7804) struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7805) struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7806) struct array_buffer *trace_buf = &tr->array_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7807) int cpu = tracing_get_cpu(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7808) struct trace_seq *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7809) unsigned long cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7810) unsigned long long t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7811) unsigned long usec_rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7813) s = kmalloc(sizeof(*s), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7814) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7815) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7817) trace_seq_init(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7819) cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7820) trace_seq_printf(s, "entries: %ld\n", cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7822) cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7823) trace_seq_printf(s, "overrun: %ld\n", cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7825) cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7826) trace_seq_printf(s, "commit overrun: %ld\n", cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7828) cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7829) trace_seq_printf(s, "bytes: %ld\n", cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7831) if (trace_clocks[tr->clock_id].in_ns) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7832) /* local or global for trace_clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7833) t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7834) usec_rem = do_div(t, USEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7835) trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7836) t, usec_rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7838) t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7839) usec_rem = do_div(t, USEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7840) trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7841) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7842) /* counter or tsc mode for trace_clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7843) trace_seq_printf(s, "oldest event ts: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7844) ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7846) trace_seq_printf(s, "now ts: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7847) ring_buffer_time_stamp(trace_buf->buffer, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7850) cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7851) trace_seq_printf(s, "dropped events: %ld\n", cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7853) cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7854) trace_seq_printf(s, "read events: %ld\n", cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7856) count = simple_read_from_buffer(ubuf, count, ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7857) s->buffer, trace_seq_used(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7859) kfree(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7861) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7864) static const struct file_operations tracing_stats_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7865) .open = tracing_open_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7866) .read = tracing_stats_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7867) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7868) .release = tracing_release_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7869) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7871) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7873) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7874) tracing_read_dyn_info(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7875) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7877) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7878) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7879) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7881) /* 256 should be plenty to hold the amount needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7882) buf = kmalloc(256, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7883) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7884) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7886) r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7887) ftrace_update_tot_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7888) ftrace_number_of_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7889) ftrace_number_of_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7891) ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7892) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7893) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7896) static const struct file_operations tracing_dyn_info_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7897) .open = tracing_open_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7898) .read = tracing_read_dyn_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7899) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7900) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7901) #endif /* CONFIG_DYNAMIC_FTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7903) #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7904) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7905) ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7906) struct trace_array *tr, struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7907) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7909) tracing_snapshot_instance(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7912) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7913) ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7914) struct trace_array *tr, struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7915) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7917) struct ftrace_func_mapper *mapper = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7918) long *count = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7920) if (mapper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7921) count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7923) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7925) if (*count <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7926) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7928) (*count)--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7931) tracing_snapshot_instance(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7934) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7935) ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7936) struct ftrace_probe_ops *ops, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7938) struct ftrace_func_mapper *mapper = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7939) long *count = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7941) seq_printf(m, "%ps:", (void *)ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7943) seq_puts(m, "snapshot");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7945) if (mapper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7946) count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7948) if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7949) seq_printf(m, ":count=%ld\n", *count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7950) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7951) seq_puts(m, ":unlimited\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7953) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7956) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7957) ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7958) unsigned long ip, void *init_data, void **data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7960) struct ftrace_func_mapper *mapper = *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7962) if (!mapper) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7963) mapper = allocate_ftrace_func_mapper();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7964) if (!mapper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7965) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7966) *data = mapper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7969) return ftrace_func_mapper_add_ip(mapper, ip, init_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7972) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7973) ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7974) unsigned long ip, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7976) struct ftrace_func_mapper *mapper = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7978) if (!ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7979) if (!mapper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7980) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7981) free_ftrace_func_mapper(mapper, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7982) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7985) ftrace_func_mapper_remove_ip(mapper, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7988) static struct ftrace_probe_ops snapshot_probe_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7989) .func = ftrace_snapshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7990) .print = ftrace_snapshot_print,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7991) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7993) static struct ftrace_probe_ops snapshot_count_probe_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7994) .func = ftrace_count_snapshot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7995) .print = ftrace_snapshot_print,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7996) .init = ftrace_snapshot_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7997) .free = ftrace_snapshot_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7998) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8000) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8001) ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8002) char *glob, char *cmd, char *param, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8004) struct ftrace_probe_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8005) void *count = (void *)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8006) char *number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8007) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8009) if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8010) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8012) /* hash funcs only work with set_ftrace_filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8013) if (!enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8014) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8016) ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8018) if (glob[0] == '!')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8019) return unregister_ftrace_function_probe_func(glob+1, tr, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8021) if (!param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8022) goto out_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8024) number = strsep(¶m, ":");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8026) if (!strlen(number))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8027) goto out_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8029) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8030) * We use the callback data field (which is a pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8031) * as our counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8033) ret = kstrtoul(number, 0, (unsigned long *)&count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8034) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8035) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8037) out_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8038) ret = tracing_alloc_snapshot_instance(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8039) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8040) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8042) ret = register_ftrace_function_probe(glob, tr, ops, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8044) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8045) return ret < 0 ? ret : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8048) static struct ftrace_func_command ftrace_snapshot_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8049) .name = "snapshot",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8050) .func = ftrace_trace_snapshot_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8051) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8053) static __init int register_snapshot_cmd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8055) return register_ftrace_command(&ftrace_snapshot_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8057) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8058) static inline __init int register_snapshot_cmd(void) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8059) #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8061) static struct dentry *tracing_get_dentry(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8063) if (WARN_ON(!tr->dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8064) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8066) /* Top directory uses NULL as the parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8067) if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8068) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8070) /* All sub buffers have a descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8071) return tr->dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8074) static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8076) struct dentry *d_tracer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8078) if (tr->percpu_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8079) return tr->percpu_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8081) d_tracer = tracing_get_dentry(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8082) if (IS_ERR(d_tracer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8083) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8085) tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8087) MEM_FAIL(!tr->percpu_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8088) "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8090) return tr->percpu_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8093) static struct dentry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8094) trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8095) void *data, long cpu, const struct file_operations *fops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8097) struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8099) if (ret) /* See tracing_get_cpu() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8100) d_inode(ret)->i_cdev = (void *)(cpu + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8101) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8104) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8105) tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8107) struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8108) struct dentry *d_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8109) char cpu_dir[30]; /* 30 characters should be more than enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8111) if (!d_percpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8112) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8114) snprintf(cpu_dir, 30, "cpu%ld", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8115) d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8116) if (!d_cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8117) pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8118) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8121) /* per cpu trace_pipe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8122) trace_create_cpu_file("trace_pipe", 0444, d_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8123) tr, cpu, &tracing_pipe_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8125) /* per cpu trace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8126) trace_create_cpu_file("trace", 0644, d_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8127) tr, cpu, &tracing_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8129) trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8130) tr, cpu, &tracing_buffers_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8132) trace_create_cpu_file("stats", 0444, d_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8133) tr, cpu, &tracing_stats_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8135) trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8136) tr, cpu, &tracing_entries_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8138) #ifdef CONFIG_TRACER_SNAPSHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8139) trace_create_cpu_file("snapshot", 0644, d_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8140) tr, cpu, &snapshot_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8142) trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8143) tr, cpu, &snapshot_raw_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8144) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8147) #ifdef CONFIG_FTRACE_SELFTEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8148) /* Let selftest have access to static functions in this file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8149) #include "trace_selftest.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8150) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8152) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8153) trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8154) loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8156) struct trace_option_dentry *topt = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8157) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8159) if (topt->flags->val & topt->opt->bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8160) buf = "1\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8161) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8162) buf = "0\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8164) return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8167) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8168) trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8169) loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8171) struct trace_option_dentry *topt = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8172) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8173) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8175) ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8176) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8177) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8179) if (val != 0 && val != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8180) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8182) if (!!(topt->flags->val & topt->opt->bit) != val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8183) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8184) ret = __set_tracer_option(topt->tr, topt->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8185) topt->opt, !val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8186) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8187) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8188) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8191) *ppos += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8193) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8197) static const struct file_operations trace_options_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8198) .open = tracing_open_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8199) .read = trace_options_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8200) .write = trace_options_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8201) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8205) * In order to pass in both the trace_array descriptor as well as the index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8206) * to the flag that the trace option file represents, the trace_array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8207) * has a character array of trace_flags_index[], which holds the index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8208) * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8209) * The address of this character array is passed to the flag option file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8210) * read/write callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8211) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8212) * In order to extract both the index and the trace_array descriptor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8213) * get_tr_index() uses the following algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8215) * idx = *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8216) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8217) * As the pointer itself contains the address of the index (remember
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8218) * index[1] == 1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8219) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8220) * Then to get the trace_array descriptor, by subtracting that index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8221) * from the ptr, we get to the start of the index itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8222) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8223) * ptr - idx == &index[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8225) * Then a simple container_of() from that pointer gets us to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8226) * trace_array descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8228) static void get_tr_index(void *data, struct trace_array **ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8229) unsigned int *pindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8231) *pindex = *(unsigned char *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8233) *ptr = container_of(data - *pindex, struct trace_array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8234) trace_flags_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8237) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8238) trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8239) loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8241) void *tr_index = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8242) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8243) unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8244) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8246) get_tr_index(tr_index, &tr, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8248) if (tr->trace_flags & (1 << index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8249) buf = "1\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8250) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8251) buf = "0\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8253) return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8256) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8257) trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8258) loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8260) void *tr_index = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8261) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8262) unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8263) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8264) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8266) get_tr_index(tr_index, &tr, &index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8268) ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8269) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8270) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8272) if (val != 0 && val != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8273) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8275) mutex_lock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8276) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8277) ret = set_tracer_flag(tr, 1 << index, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8278) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8279) mutex_unlock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8281) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8282) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8284) *ppos += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8286) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8289) static const struct file_operations trace_options_core_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8290) .open = tracing_open_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8291) .read = trace_options_core_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8292) .write = trace_options_core_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8293) .llseek = generic_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8294) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8296) struct dentry *trace_create_file(const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8297) umode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8298) struct dentry *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8299) void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8300) const struct file_operations *fops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8302) struct dentry *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8304) ret = tracefs_create_file(name, mode, parent, data, fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8305) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8306) pr_warn("Could not create tracefs '%s' entry\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8308) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8312) static struct dentry *trace_options_init_dentry(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8314) struct dentry *d_tracer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8316) if (tr->options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8317) return tr->options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8319) d_tracer = tracing_get_dentry(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8320) if (IS_ERR(d_tracer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8321) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8323) tr->options = tracefs_create_dir("options", d_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8324) if (!tr->options) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8325) pr_warn("Could not create tracefs directory 'options'\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8326) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8329) return tr->options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8332) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8333) create_trace_option_file(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8334) struct trace_option_dentry *topt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8335) struct tracer_flags *flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8336) struct tracer_opt *opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8338) struct dentry *t_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8340) t_options = trace_options_init_dentry(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8341) if (!t_options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8342) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8344) topt->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8345) topt->opt = opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8346) topt->tr = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8348) topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8349) &trace_options_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8353) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8354) create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8356) struct trace_option_dentry *topts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8357) struct trace_options *tr_topts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8358) struct tracer_flags *flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8359) struct tracer_opt *opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8360) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8361) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8363) if (!tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8364) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8366) flags = tracer->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8368) if (!flags || !flags->opts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8369) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8372) * If this is an instance, only create flags for tracers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8373) * the instance may have.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8375) if (!trace_ok_for_array(tracer, tr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8376) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8378) for (i = 0; i < tr->nr_topts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8379) /* Make sure there's no duplicate flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8380) if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8381) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8384) opts = flags->opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8386) for (cnt = 0; opts[cnt].name; cnt++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8387) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8389) topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8390) if (!topts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8391) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8393) tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8394) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8395) if (!tr_topts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8396) kfree(topts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8397) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8400) tr->topts = tr_topts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8401) tr->topts[tr->nr_topts].tracer = tracer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8402) tr->topts[tr->nr_topts].topts = topts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8403) tr->nr_topts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8405) for (cnt = 0; opts[cnt].name; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8406) create_trace_option_file(tr, &topts[cnt], flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8407) &opts[cnt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8408) MEM_FAIL(topts[cnt].entry == NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8409) "Failed to create trace option: %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8410) opts[cnt].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8414) static struct dentry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8415) create_trace_option_core_file(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8416) const char *option, long index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8418) struct dentry *t_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8420) t_options = trace_options_init_dentry(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8421) if (!t_options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8422) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8424) return trace_create_file(option, 0644, t_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8425) (void *)&tr->trace_flags_index[index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8426) &trace_options_core_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8429) static void create_trace_options_dir(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8431) struct dentry *t_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8432) bool top_level = tr == &global_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8433) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8435) t_options = trace_options_init_dentry(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8436) if (!t_options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8437) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8439) for (i = 0; trace_options[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8440) if (top_level ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8441) !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8442) create_trace_option_core_file(tr, trace_options[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8446) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8447) rb_simple_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8448) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8450) struct trace_array *tr = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8451) char buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8452) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8454) r = tracer_tracing_is_on(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8455) r = sprintf(buf, "%d\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8457) return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8460) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8461) rb_simple_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8462) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8464) struct trace_array *tr = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8465) struct trace_buffer *buffer = tr->array_buffer.buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8466) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8467) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8469) ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8470) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8471) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8473) if (buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8474) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8475) if (!!val == tracer_tracing_is_on(tr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8476) val = 0; /* do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8477) } else if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8478) tracer_tracing_on(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8479) if (tr->current_trace->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8480) tr->current_trace->start(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8481) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8482) tracer_tracing_off(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8483) if (tr->current_trace->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8484) tr->current_trace->stop(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8486) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8489) (*ppos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8491) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8494) static const struct file_operations rb_simple_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8495) .open = tracing_open_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8496) .read = rb_simple_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8497) .write = rb_simple_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8498) .release = tracing_release_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8499) .llseek = default_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8500) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8502) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8503) buffer_percent_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8504) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8506) struct trace_array *tr = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8507) char buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8508) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8510) r = tr->buffer_percent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8511) r = sprintf(buf, "%d\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8513) return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8516) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8517) buffer_percent_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8518) size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8520) struct trace_array *tr = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8521) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8522) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8524) ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8525) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8526) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8528) if (val > 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8529) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8531) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8532) val = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8534) tr->buffer_percent = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8536) (*ppos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8538) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8541) static const struct file_operations buffer_percent_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8542) .open = tracing_open_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8543) .read = buffer_percent_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8544) .write = buffer_percent_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8545) .release = tracing_release_generic_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8546) .llseek = default_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8547) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8549) static struct dentry *trace_instance_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8551) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8552) init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8554) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8555) allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8557) enum ring_buffer_flags rb_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8559) rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8561) buf->tr = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8563) buf->buffer = ring_buffer_alloc(size, rb_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8564) if (!buf->buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8565) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8567) buf->data = alloc_percpu(struct trace_array_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8568) if (!buf->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8569) ring_buffer_free(buf->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8570) buf->buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8571) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8574) /* Allocate the first page for all buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8575) set_buffer_entries(&tr->array_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8576) ring_buffer_size(tr->array_buffer.buffer, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8578) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8581) static int allocate_trace_buffers(struct trace_array *tr, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8583) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8585) ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8586) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8587) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8589) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8590) ret = allocate_trace_buffer(tr, &tr->max_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8591) allocate_snapshot ? size : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8592) if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8593) ring_buffer_free(tr->array_buffer.buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8594) tr->array_buffer.buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8595) free_percpu(tr->array_buffer.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8596) tr->array_buffer.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8597) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8599) tr->allocated_snapshot = allocate_snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8602) * Only the top level trace array gets its snapshot allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8603) * from the kernel command line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8605) allocate_snapshot = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8606) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8608) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8611) static void free_trace_buffer(struct array_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8613) if (buf->buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8614) ring_buffer_free(buf->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8615) buf->buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8616) free_percpu(buf->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8617) buf->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8621) static void free_trace_buffers(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8623) if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8624) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8626) free_trace_buffer(&tr->array_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8628) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8629) free_trace_buffer(&tr->max_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8630) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8633) static void init_trace_flags_index(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8635) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8637) /* Used by the trace options files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8638) for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8639) tr->trace_flags_index[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8642) static void __update_tracer_options(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8644) struct tracer *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8646) for (t = trace_types; t; t = t->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8647) add_tracer_options(tr, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8650) static void update_tracer_options(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8652) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8653) __update_tracer_options(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8654) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8657) /* Must have trace_types_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8658) struct trace_array *trace_array_find(const char *instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8660) struct trace_array *tr, *found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8662) list_for_each_entry(tr, &ftrace_trace_arrays, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8663) if (tr->name && strcmp(tr->name, instance) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8664) found = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8665) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8669) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8672) struct trace_array *trace_array_find_get(const char *instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8674) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8676) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8677) tr = trace_array_find(instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8678) if (tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8679) tr->ref++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8680) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8682) return tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8685) static int trace_array_create_dir(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8687) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8689) tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8690) if (!tr->dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8691) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8693) ret = event_trace_add_tracer(tr->dir, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8694) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8695) tracefs_remove(tr->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8696) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8699) init_tracer_tracefs(tr, tr->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8700) __update_tracer_options(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8702) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8705) static struct trace_array *trace_array_create(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8707) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8708) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8710) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8711) tr = kzalloc(sizeof(*tr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8712) if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8713) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8715) tr->name = kstrdup(name, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8716) if (!tr->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8717) goto out_free_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8719) if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8720) goto out_free_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8722) tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8724) cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8726) raw_spin_lock_init(&tr->start_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8728) tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8730) tr->current_trace = &nop_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8732) INIT_LIST_HEAD(&tr->systems);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8733) INIT_LIST_HEAD(&tr->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8734) INIT_LIST_HEAD(&tr->hist_vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8735) INIT_LIST_HEAD(&tr->err_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8737) if (allocate_trace_buffers(tr, trace_buf_size) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8738) goto out_free_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8740) if (ftrace_allocate_ftrace_ops(tr) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8741) goto out_free_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8743) ftrace_init_trace_array(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8745) init_trace_flags_index(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8747) if (trace_instance_dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8748) ret = trace_array_create_dir(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8749) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8750) goto out_free_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8751) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8752) __trace_early_add_events(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8754) list_add(&tr->list, &ftrace_trace_arrays);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8756) tr->ref++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8758) return tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8760) out_free_tr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8761) ftrace_free_ftrace_ops(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8762) free_trace_buffers(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8763) free_cpumask_var(tr->tracing_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8764) kfree(tr->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8765) kfree(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8767) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8770) static int instance_mkdir(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8772) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8773) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8775) mutex_lock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8776) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8778) ret = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8779) if (trace_array_find(name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8780) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8782) tr = trace_array_create(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8784) ret = PTR_ERR_OR_ZERO(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8786) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8787) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8788) mutex_unlock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8789) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8792) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8793) * trace_array_get_by_name - Create/Lookup a trace array, given its name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8794) * @name: The name of the trace array to be looked up/created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8795) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8796) * Returns pointer to trace array with given name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8797) * NULL, if it cannot be created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8798) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8799) * NOTE: This function increments the reference counter associated with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8800) * trace array returned. This makes sure it cannot be freed while in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8801) * Use trace_array_put() once the trace array is no longer needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8802) * If the trace_array is to be freed, trace_array_destroy() needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8803) * be called after the trace_array_put(), or simply let user space delete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8804) * it from the tracefs instances directory. But until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8805) * trace_array_put() is called, user space can not delete it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8806) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8807) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8808) struct trace_array *trace_array_get_by_name(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8810) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8812) mutex_lock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8813) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8815) list_for_each_entry(tr, &ftrace_trace_arrays, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8816) if (tr->name && strcmp(tr->name, name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8817) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8820) tr = trace_array_create(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8822) if (IS_ERR(tr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8823) tr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8824) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8825) if (tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8826) tr->ref++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8828) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8829) mutex_unlock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8830) return tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8832) EXPORT_SYMBOL_GPL(trace_array_get_by_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8834) static int __remove_instance(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8836) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8838) /* Reference counter for a newly created trace array = 1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8839) if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8840) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8842) list_del(&tr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8844) /* Disable all the flags that were enabled coming in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8845) for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8846) if ((1 << i) & ZEROED_TRACE_FLAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8847) set_tracer_flag(tr, 1 << i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8850) tracing_set_nop(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8851) clear_ftrace_function_probes(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8852) event_trace_del_tracer(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8853) ftrace_clear_pids(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8854) ftrace_destroy_function_files(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8855) tracefs_remove(tr->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8856) free_trace_buffers(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8858) for (i = 0; i < tr->nr_topts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8859) kfree(tr->topts[i].topts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8861) kfree(tr->topts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8863) free_cpumask_var(tr->tracing_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8864) kfree(tr->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8865) kfree(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8867) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8870) int trace_array_destroy(struct trace_array *this_tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8872) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8873) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8875) if (!this_tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8876) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8878) mutex_lock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8879) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8881) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8883) /* Making sure trace array exists before destroying it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8884) list_for_each_entry(tr, &ftrace_trace_arrays, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8885) if (tr == this_tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8886) ret = __remove_instance(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8887) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8891) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8892) mutex_unlock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8894) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8896) EXPORT_SYMBOL_GPL(trace_array_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8898) static int instance_rmdir(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8900) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8901) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8903) mutex_lock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8904) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8906) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8907) tr = trace_array_find(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8908) if (tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8909) ret = __remove_instance(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8911) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8912) mutex_unlock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8914) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8917) static __init void create_trace_instances(struct dentry *d_tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8919) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8921) trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8922) instance_mkdir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8923) instance_rmdir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8924) if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8925) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8927) mutex_lock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8928) mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8930) list_for_each_entry(tr, &ftrace_trace_arrays, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8931) if (!tr->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8932) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8933) if (MEM_FAIL(trace_array_create_dir(tr) < 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8934) "Failed to create instance directory\n"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8935) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8938) mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8939) mutex_unlock(&event_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8942) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8943) init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8945) struct trace_event_file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8946) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8948) trace_create_file("available_tracers", 0444, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8949) tr, &show_traces_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8951) trace_create_file("current_tracer", 0644, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8952) tr, &set_tracer_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8954) trace_create_file("tracing_cpumask", 0644, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8955) tr, &tracing_cpumask_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8957) trace_create_file("trace_options", 0644, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8958) tr, &tracing_iter_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8960) trace_create_file("trace", 0644, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8961) tr, &tracing_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8963) trace_create_file("trace_pipe", 0444, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8964) tr, &tracing_pipe_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8966) trace_create_file("buffer_size_kb", 0644, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8967) tr, &tracing_entries_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8969) trace_create_file("buffer_total_size_kb", 0444, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8970) tr, &tracing_total_entries_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8972) trace_create_file("free_buffer", 0200, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8973) tr, &tracing_free_buffer_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8975) trace_create_file("trace_marker", 0220, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8976) tr, &tracing_mark_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8978) file = __find_event_file(tr, "ftrace", "print");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8979) if (file && file->dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8980) trace_create_file("trigger", 0644, file->dir, file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8981) &event_trigger_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8982) tr->trace_marker_file = file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8984) trace_create_file("trace_marker_raw", 0220, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8985) tr, &tracing_mark_raw_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8987) trace_create_file("trace_clock", 0644, d_tracer, tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8988) &trace_clock_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8990) trace_create_file("tracing_on", 0644, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8991) tr, &rb_simple_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8993) trace_create_file("timestamp_mode", 0444, d_tracer, tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8994) &trace_time_stamp_mode_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8996) tr->buffer_percent = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8998) trace_create_file("buffer_percent", 0444, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8999) tr, &buffer_percent_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9001) create_trace_options_dir(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9003) #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9004) trace_create_maxlat_file(tr, d_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9005) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9007) if (ftrace_create_function_files(tr, d_tracer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9008) MEM_FAIL(1, "Could not allocate function filter files");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9010) #ifdef CONFIG_TRACER_SNAPSHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9011) trace_create_file("snapshot", 0644, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9012) tr, &snapshot_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9013) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9015) trace_create_file("error_log", 0644, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9016) tr, &tracing_err_log_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9018) for_each_tracing_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9019) tracing_init_tracefs_percpu(tr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9021) ftrace_init_tracefs(tr, d_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9024) #ifndef CONFIG_TRACEFS_DISABLE_AUTOMOUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9025) static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9027) struct vfsmount *mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9028) struct file_system_type *type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9030) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9031) * To maintain backward compatibility for tools that mount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9032) * debugfs to get to the tracing facility, tracefs is automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9033) * mounted to the debugfs/tracing directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9034) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9035) type = get_fs_type("tracefs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9036) if (!type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9037) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9038) mnt = vfs_submount(mntpt, type, "tracefs", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9039) put_filesystem(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9040) if (IS_ERR(mnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9041) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9042) mntget(mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9044) return mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9046) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9048) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9049) * tracing_init_dentry - initialize top level trace array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9050) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9051) * This is called when creating files or directories in the tracing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9052) * directory. It is called via fs_initcall() by any of the boot up code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9053) * and expects to return the dentry of the top level tracing directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9054) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9055) int tracing_init_dentry(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9057) struct trace_array *tr = &global_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9059) if (security_locked_down(LOCKDOWN_TRACEFS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9060) pr_warn("Tracing disabled due to lockdown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9061) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9064) /* The top level trace array uses NULL as parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9065) if (tr->dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9066) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9068) if (WARN_ON(!tracefs_initialized()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9069) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9071) #ifndef CONFIG_TRACEFS_DISABLE_AUTOMOUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9072) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9073) * As there may still be users that expect the tracing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9074) * files to exist in debugfs/tracing, we must automount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9075) * the tracefs file system there, so older tools still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9076) * work with the newer kerenl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9077) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9078) tr->dir = debugfs_create_automount("tracing", NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9079) trace_automount, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9080) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9081) tr->dir = ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9082) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9084) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9087) extern struct trace_eval_map *__start_ftrace_eval_maps[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9088) extern struct trace_eval_map *__stop_ftrace_eval_maps[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9090) static void __init trace_eval_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9092) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9094) len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9095) trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9098) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9099) static void trace_module_add_evals(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9101) if (!mod->num_trace_evals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9102) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9105) * Modules with bad taint do not have events created, do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9106) * not bother with enums either.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9108) if (trace_module_has_bad_taint(mod))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9109) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9111) trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9114) #ifdef CONFIG_TRACE_EVAL_MAP_FILE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9115) static void trace_module_remove_evals(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9117) union trace_eval_map_item *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9118) union trace_eval_map_item **last = &trace_eval_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9120) if (!mod->num_trace_evals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9121) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9123) mutex_lock(&trace_eval_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9125) map = trace_eval_maps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9127) while (map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9128) if (map->head.mod == mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9129) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9130) map = trace_eval_jmp_to_tail(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9131) last = &map->tail.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9132) map = map->tail.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9134) if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9135) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9137) *last = trace_eval_jmp_to_tail(map)->tail.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9138) kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9139) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9140) mutex_unlock(&trace_eval_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9142) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9143) static inline void trace_module_remove_evals(struct module *mod) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9144) #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9146) static int trace_module_notify(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9147) unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9149) struct module *mod = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9151) switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9152) case MODULE_STATE_COMING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9153) trace_module_add_evals(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9154) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9155) case MODULE_STATE_GOING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9156) trace_module_remove_evals(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9157) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9160) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9163) static struct notifier_block trace_module_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9164) .notifier_call = trace_module_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9165) .priority = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9166) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9167) #endif /* CONFIG_MODULES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9169) static __init int tracer_init_tracefs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9171) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9173) trace_access_lock_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9175) ret = tracing_init_dentry();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9176) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9179) event_trace_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9181) init_tracer_tracefs(&global_trace, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9182) ftrace_init_tracefs_toplevel(&global_trace, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9184) trace_create_file("tracing_thresh", 0644, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9185) &global_trace, &tracing_thresh_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9187) trace_create_file("README", 0444, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9188) NULL, &tracing_readme_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9190) trace_create_file("saved_cmdlines", 0444, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9191) NULL, &tracing_saved_cmdlines_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9193) trace_create_file("saved_cmdlines_size", 0644, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9194) NULL, &tracing_saved_cmdlines_size_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9196) trace_create_file("saved_tgids", 0444, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9197) NULL, &tracing_saved_tgids_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9199) trace_eval_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9201) trace_create_eval_file(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9203) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9204) register_module_notifier(&trace_module_nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9205) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9207) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9208) trace_create_file("dyn_ftrace_total_info", 0444, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9209) NULL, &tracing_dyn_info_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9210) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9212) create_trace_instances(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9214) update_tracer_options(&global_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9219) static int trace_panic_handler(struct notifier_block *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9220) unsigned long event, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9222) bool ftrace_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9224) trace_android_vh_ftrace_oops_enter(&ftrace_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9226) if (ftrace_check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9227) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9229) if (ftrace_dump_on_oops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9230) ftrace_dump(ftrace_dump_on_oops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9232) trace_android_vh_ftrace_oops_exit(&ftrace_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9233) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9236) static struct notifier_block trace_panic_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9237) .notifier_call = trace_panic_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9238) .next = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9239) .priority = 150 /* priority: INT_MAX >= x >= 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9240) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9242) static int trace_die_handler(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9243) unsigned long val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9244) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9246) bool ftrace_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9248) trace_android_vh_ftrace_oops_enter(&ftrace_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9250) if (ftrace_check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9251) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9253) switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9254) case DIE_OOPS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9255) if (ftrace_dump_on_oops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9256) ftrace_dump(ftrace_dump_on_oops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9257) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9258) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9259) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9262) trace_android_vh_ftrace_oops_exit(&ftrace_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9263) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9266) static struct notifier_block trace_die_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9267) .notifier_call = trace_die_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9268) .priority = 200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9269) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9272) * printk is set to max of 1024, we really don't need it that big.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9273) * Nothing should be printing 1000 characters anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9275) #define TRACE_MAX_PRINT 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9278) * Define here KERN_TRACE so that we have one place to modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9279) * it if we decide to change what log level the ftrace dump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9280) * should be at.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9282) #define KERN_TRACE KERN_EMERG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9284) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9285) trace_printk_seq(struct trace_seq *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9287) bool dump_printk = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9289) /* Probably should print a warning here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9290) if (s->seq.len >= TRACE_MAX_PRINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9291) s->seq.len = TRACE_MAX_PRINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9294) * More paranoid code. Although the buffer size is set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9295) * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9296) * an extra layer of protection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9298) if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9299) s->seq.len = s->seq.size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9301) /* should be zero ended, but we are paranoid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9302) s->buffer[s->seq.len] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9304) trace_android_vh_ftrace_dump_buffer(s, &dump_printk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9305) if (dump_printk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9306) printk(KERN_TRACE "%s", s->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9308) trace_seq_init(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9311) void trace_init_global_iter(struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9313) iter->tr = &global_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9314) iter->trace = iter->tr->current_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9315) iter->cpu_file = RING_BUFFER_ALL_CPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9316) iter->array_buffer = &global_trace.array_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9318) if (iter->trace && iter->trace->open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9319) iter->trace->open(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9321) /* Annotate start of buffers if we had overruns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9322) if (ring_buffer_overruns(iter->array_buffer->buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9323) iter->iter_flags |= TRACE_FILE_ANNOTATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9325) /* Output in nanoseconds only if we are using a clock in nanoseconds. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9326) if (trace_clocks[iter->tr->clock_id].in_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9327) iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9330) void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9332) /* use static because iter can be a bit big for the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9333) static struct trace_iterator iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9334) static atomic_t dump_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9335) struct trace_array *tr = &global_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9336) unsigned int old_userobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9337) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9338) int cnt = 0, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9339) bool ftrace_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9340) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9342) /* Only allow one dump user at a time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9343) if (atomic_inc_return(&dump_running) != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9344) atomic_dec(&dump_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9345) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9349) * Always turn off tracing when we dump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9350) * We don't need to show trace output of what happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9351) * between multiple crashes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9353) * If the user does a sysrq-z, then they can re-enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9354) * tracing with echo 1 > tracing_on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9356) tracing_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9358) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9359) printk_nmi_direct_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9361) /* Simulate the iterator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9362) trace_init_global_iter(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9363) /* Can not use kmalloc for iter.temp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9364) iter.temp = static_temp_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9365) iter.temp_size = STATIC_TEMP_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9367) for_each_tracing_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9368) atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9369) size = ring_buffer_size(iter.array_buffer->buffer, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9370) trace_android_vh_ftrace_size_check(size, &ftrace_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9373) old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9375) /* don't look at user memory in panic mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9376) tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9378) if (ftrace_check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9379) goto out_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9381) switch (oops_dump_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9382) case DUMP_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9383) iter.cpu_file = RING_BUFFER_ALL_CPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9384) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9385) case DUMP_ORIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9386) iter.cpu_file = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9387) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9388) case DUMP_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9389) goto out_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9390) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9391) printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9392) iter.cpu_file = RING_BUFFER_ALL_CPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9395) printk(KERN_TRACE "Dumping ftrace buffer:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9397) /* Did function tracer already get disabled? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9398) if (ftrace_is_dead()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9399) printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9400) printk("# MAY BE MISSING FUNCTION EVENTS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9404) * We need to stop all tracing on all CPUS to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9405) * the next buffer. This is a bit expensive, but is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9406) * not done often. We fill all what we can read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9407) * and then release the locks again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9410) while (!trace_empty(&iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9411) ftrace_check = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9413) if (!cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9414) printk(KERN_TRACE "---------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9416) cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9418) trace_iterator_reset(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9419) trace_android_vh_ftrace_format_check(&ftrace_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9420) if (ftrace_check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9421) iter.iter_flags |= TRACE_FILE_LAT_FMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9423) if (trace_find_next_entry_inc(&iter) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9424) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9426) ret = print_trace_line(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9427) if (ret != TRACE_TYPE_NO_CONSUME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9428) trace_consume(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9430) touch_nmi_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9432) trace_printk_seq(&iter.seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9435) if (!cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9436) printk(KERN_TRACE " (ftrace buffer empty)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9437) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9438) printk(KERN_TRACE "---------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9440) out_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9441) tr->trace_flags |= old_userobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9443) for_each_tracing_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9444) atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9446) atomic_dec(&dump_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9447) printk_nmi_direct_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9448) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9450) EXPORT_SYMBOL_GPL(ftrace_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9452) int trace_run_command(const char *buf, int (*createfn)(int, char **))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9454) char **argv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9455) int argc, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9457) argc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9458) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9459) argv = argv_split(GFP_KERNEL, buf, &argc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9460) if (!argv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9461) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9463) if (argc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9464) ret = createfn(argc, argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9466) argv_free(argv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9468) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9471) #define WRITE_BUFSIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9473) ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9474) size_t count, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9475) int (*createfn)(int, char **))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9477) char *kbuf, *buf, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9478) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9479) size_t done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9480) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9482) kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9483) if (!kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9484) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9486) while (done < count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9487) size = count - done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9489) if (size >= WRITE_BUFSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9490) size = WRITE_BUFSIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9492) if (copy_from_user(kbuf, buffer + done, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9493) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9494) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9496) kbuf[size] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9497) buf = kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9498) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9499) tmp = strchr(buf, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9500) if (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9501) *tmp = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9502) size = tmp - buf + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9503) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9504) size = strlen(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9505) if (done + size < count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9506) if (buf != kbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9507) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9508) /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9509) pr_warn("Line length is too long: Should be less than %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9510) WRITE_BUFSIZE - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9511) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9512) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9515) done += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9517) /* Remove comments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9518) tmp = strchr(buf, '#');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9520) if (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9521) *tmp = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9523) ret = trace_run_command(buf, createfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9524) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9525) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9526) buf += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9528) } while (done < count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9530) ret = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9532) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9533) kfree(kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9535) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9538) __init static int tracer_alloc_buffers(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9540) int ring_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9541) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9544) if (security_locked_down(LOCKDOWN_TRACEFS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9545) pr_warn("Tracing disabled due to lockdown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9546) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9550) * Make sure we don't accidentally add more trace options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9551) * than we have bits for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9553) BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9555) if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9556) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9558) if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9559) goto out_free_buffer_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9561) /* Only allocate trace_printk buffers if a trace_printk exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9562) if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9563) /* Must be called before global_trace.buffer is allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9564) trace_printk_init_buffers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9566) /* To save memory, keep the ring buffer size to its minimum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9567) if (ring_buffer_expanded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9568) ring_buf_size = trace_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9569) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9570) ring_buf_size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9572) cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9573) cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9575) raw_spin_lock_init(&global_trace.start_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9578) * The prepare callbacks allocates some memory for the ring buffer. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9579) * don't free the buffer if the CPU goes down. If we were to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9580) * the buffer, then the user would lose any trace that was in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9581) * buffer. The memory will be removed once the "instance" is removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9583) ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9584) "trace/RB:preapre", trace_rb_cpu_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9585) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9586) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9587) goto out_free_cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9588) /* Used for event triggers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9589) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9590) temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9591) if (!temp_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9592) goto out_rm_hp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9594) if (trace_create_savedcmd() < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9595) goto out_free_temp_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9597) /* TODO: make the number of buffers hot pluggable with CPUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9598) if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9599) MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9600) goto out_free_savedcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9603) if (global_trace.buffer_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9604) tracing_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9606) if (trace_boot_clock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9607) ret = tracing_set_clock(&global_trace, trace_boot_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9608) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9609) pr_warn("Trace clock %s not defined, going back to default\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9610) trace_boot_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9614) * register_tracer() might reference current_trace, so it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9615) * needs to be set before we register anything. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9616) * just a bootstrap of current_trace anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9618) global_trace.current_trace = &nop_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9620) global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9622) ftrace_init_global_array_ops(&global_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9624) init_trace_flags_index(&global_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9626) register_tracer(&nop_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9628) /* Function tracing may start here (via kernel command line) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9629) init_function_trace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9631) /* All seems OK, enable tracing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9632) tracing_disabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9634) atomic_notifier_chain_register(&panic_notifier_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9635) &trace_panic_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9637) register_die_notifier(&trace_die_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9639) global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9641) INIT_LIST_HEAD(&global_trace.systems);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9642) INIT_LIST_HEAD(&global_trace.events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9643) INIT_LIST_HEAD(&global_trace.hist_vars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9644) INIT_LIST_HEAD(&global_trace.err_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9645) list_add(&global_trace.list, &ftrace_trace_arrays);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9647) apply_trace_boot_options();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9649) register_snapshot_cmd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9651) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9653) out_free_savedcmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9654) free_saved_cmdlines_buffer(savedcmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9655) out_free_temp_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9656) ring_buffer_free(temp_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9657) out_rm_hp_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9658) cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9659) out_free_cpumask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9660) free_cpumask_var(global_trace.tracing_cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9661) out_free_buffer_mask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9662) free_cpumask_var(tracing_buffer_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9663) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9664) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9667) void __init early_trace_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9669) if (tracepoint_printk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9670) tracepoint_print_iter =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9671) kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9672) if (MEM_FAIL(!tracepoint_print_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9673) "Failed to allocate trace iterator\n"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9674) tracepoint_printk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9675) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9676) static_key_enable(&tracepoint_printk_key.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9678) tracer_alloc_buffers();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9681) void __init trace_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9683) trace_event_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9686) __init static int clear_boot_tracer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9689) * The default tracer at boot buffer is an init section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9690) * This function is called in lateinit. If we did not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9691) * find the boot tracer, then clear it out, to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9692) * later registration from accessing the buffer that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9693) * about to be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9695) if (!default_bootup_tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9696) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9698) printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9699) default_bootup_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9700) default_bootup_tracer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9702) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9705) fs_initcall(tracer_init_tracefs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9706) late_initcall_sync(clear_boot_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9708) #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9709) __init static int tracing_set_default_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9711) /* sched_clock_stable() is determined in late_initcall */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9712) if (!trace_boot_clock && !sched_clock_stable()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9713) if (security_locked_down(LOCKDOWN_TRACEFS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9714) pr_warn("Can not set tracing clock due to lockdown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9715) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9718) printk(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9719) "Unstable clock detected, switching default tracing clock to \"global\"\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9720) "If you want to keep using the local clock, then add:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9721) " \"trace_clock=local\"\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9722) "on the kernel command line\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9723) tracing_set_clock(&global_trace, "global");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9726) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9728) late_initcall_sync(tracing_set_default_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9729) #endif