^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #ifndef _LINUX_KERNEL_TRACE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #define _LINUX_KERNEL_TRACE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/ring_buffer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mmiotrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/tracepoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/trace_seq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/trace_events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/glob.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/irq_work.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #ifdef CONFIG_FTRACE_SYSCALLS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/unistd.h> /* For NR_SYSCALLS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/syscall.h> /* some archs define it here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) enum trace_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) __TRACE_FIRST_TYPE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) TRACE_FN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) TRACE_CTX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) TRACE_WAKE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) TRACE_STACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) TRACE_PRINT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) TRACE_BPRINT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) TRACE_MMIO_RW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) TRACE_MMIO_MAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) TRACE_BRANCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) TRACE_GRAPH_RET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) TRACE_GRAPH_ENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) TRACE_USER_STACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) TRACE_BLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) TRACE_BPUTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) TRACE_HWLAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) TRACE_RAW_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) __TRACE_LAST_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #undef __field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define __field(type, item) type item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #undef __field_fn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define __field_fn(type, item) type item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #undef __field_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define __field_struct(type, item) __field(type, item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #undef __field_desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define __field_desc(type, container, item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #undef __field_packed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define __field_packed(type, container, item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #undef __array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define __array(type, item, size) type item[size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #undef __array_desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define __array_desc(type, container, item, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #undef __dynamic_array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define __dynamic_array(type, item) type item[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #undef F_STRUCT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define F_STRUCT(args...) args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #undef FTRACE_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct struct_name { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct trace_entry ent; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) tstruct \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #undef FTRACE_ENTRY_DUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #undef FTRACE_ENTRY_REG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #undef FTRACE_ENTRY_PACKED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #include "trace_entries.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* Use this for memory failure errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define MEM_FAIL(condition, fmt, ...) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static bool __section(".data.once") __warned; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int __ret_warn_once = !!(condition); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (unlikely(__ret_warn_once && !__warned)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) __warned = true; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) pr_err("ERROR: " fmt, ##__VA_ARGS__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unlikely(__ret_warn_once); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * syscalls are special, and need special handling, this is why
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * they are not included in trace_entries.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct syscall_trace_enter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct trace_entry ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned long args[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct syscall_trace_exit {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct trace_entry ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct kprobe_trace_entry_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct trace_entry ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned long ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct kretprobe_trace_entry_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct trace_entry ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) unsigned long func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned long ret_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * trace_flag_type is an enumeration that holds different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * states when a trace occurs. These are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * IRQS_OFF - interrupts were disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * NEED_RESCHED - reschedule is requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * HARDIRQ - inside an interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * SOFTIRQ - inside a softirq handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) enum trace_flag_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) TRACE_FLAG_IRQS_OFF = 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) TRACE_FLAG_NEED_RESCHED = 0x04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) TRACE_FLAG_HARDIRQ = 0x08,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) TRACE_FLAG_SOFTIRQ = 0x10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) TRACE_FLAG_PREEMPT_RESCHED = 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) TRACE_FLAG_NMI = 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define TRACE_BUF_SIZE 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct trace_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * The CPU trace array - it consists of thousands of trace entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * plus some other descriptor data: (for example which task started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * the trace, etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct trace_array_cpu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) atomic_t disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) void *buffer_page; /* ring buffer spare */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned long entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned long saved_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) unsigned long critical_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned long critical_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned long critical_sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned long nice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned long policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned long rt_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned long skipped_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) u64 preempt_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) kuid_t uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) char comm[TASK_COMM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #ifdef CONFIG_FUNCTION_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int ftrace_ignore_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) bool ignore_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct tracer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct trace_option_dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct array_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct trace_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct trace_array_cpu __percpu *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u64 time_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define TRACE_FLAGS_MAX_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct trace_options {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct tracer *tracer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct trace_option_dentry *topts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct trace_pid_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int pid_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned long *pids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) TRACE_PIDS = BIT(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) TRACE_NO_PIDS = BIT(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct trace_pid_list *no_pid_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* Return true if the pid list in type has pids */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return ((type & TRACE_PIDS) && pid_list) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ((type & TRACE_NO_PIDS) && no_pid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct trace_pid_list *no_pid_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * Turning off what is in @type, return true if the "other"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * pid list, still has pids in it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return (!(type & TRACE_PIDS) && pid_list) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) (!(type & TRACE_NO_PIDS) && no_pid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * struct cond_snapshot - conditional snapshot data and callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * The cond_snapshot structure encapsulates a callback function and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * data associated with the snapshot for a given tracing instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * When a snapshot is taken conditionally, by invoking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * passed in turn to the cond_snapshot.update() function. That data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * can be compared by the update() implementation with the cond_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * contained within the struct cond_snapshot instance associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * the trace_array. Because the tr->max_lock is held throughout the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * update() call, the update() function can directly retrieve the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * cond_snapshot and cond_data associated with the per-instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * snapshot associated with the trace_array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * The cond_snapshot.update() implementation can save data to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * associated with the snapshot if it decides to, and returns 'true'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * in that case, or it returns 'false' if the conditional snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * shouldn't be taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * The cond_snapshot instance is created and associated with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * user-defined cond_data by tracing_cond_snapshot_enable().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * Likewise, the cond_snapshot instance is destroyed and is no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * associated with the trace instance by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * tracing_cond_snapshot_disable().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * The method below is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * @update: When a conditional snapshot is invoked, the update()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * callback function is invoked with the tr->max_lock held. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * update() implementation signals whether or not to actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * take the snapshot, by returning 'true' if so, 'false' if no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * snapshot should be taken. Because the max_lock is held for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * the duration of update(), the implementation is safe to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * directly retrieved and save any implementation data it needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * to in association with the snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct cond_snapshot {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) void *cond_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) cond_update_fn_t update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * The trace array - an array of per-CPU trace arrays. This is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * highest level data structure that individual tracers deal with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * They have on/off state as well:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct trace_array {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct array_buffer array_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * The max_buffer is used to snapshot the trace when a maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * latency is reached, or when the user initiates a snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Some tracers will use this to store a maximum trace while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * it continues examining live traces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * The buffers for the max_buffer are set up the same as the array_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * When a snapshot is taken, the buffer of the max_buffer is swapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * with the buffer of the array_buffer and the buffers are reset for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * the array_buffer so the tracing can continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct array_buffer max_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) bool allocated_snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned long max_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #ifdef CONFIG_FSNOTIFY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct dentry *d_max_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct work_struct fsnotify_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct irq_work fsnotify_irqwork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct trace_pid_list __rcu *filtered_pids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct trace_pid_list __rcu *filtered_no_pids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * max_lock is used to protect the swapping of buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * when taking a max snapshot. The buffers themselves are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * protected by per_cpu spinlocks. But the action of the swap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * needs its own lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * This is defined as a arch_spinlock_t in order to help
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * with performance when lockdep debugging is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * It is also used in other places outside the update_max_tr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * so it needs to be defined outside of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * CONFIG_TRACER_MAX_TRACE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) arch_spinlock_t max_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) int buffer_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #ifdef CONFIG_FTRACE_SYSCALLS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int sys_refcount_enter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int sys_refcount_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) int stop_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) int clock_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int nr_topts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) bool clear_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int buffer_percent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) unsigned int n_err_log_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct tracer *current_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) unsigned int trace_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) raw_spinlock_t start_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct list_head err_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct dentry *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct dentry *options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct dentry *percpu_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct dentry *event_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct trace_options *topts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct list_head systems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct list_head events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct trace_event_file *trace_marker_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) int ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) int trace_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #ifdef CONFIG_FUNCTION_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct ftrace_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct trace_pid_list __rcu *function_pids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct trace_pid_list __rcu *function_no_pids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* All of these are protected by the ftrace_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct list_head func_probes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct list_head mod_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct list_head mod_notrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* function tracing enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int function_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int time_stamp_abs_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct list_head hist_vars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #ifdef CONFIG_TRACER_SNAPSHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct cond_snapshot *cond_snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) TRACE_ARRAY_FL_GLOBAL = (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) extern struct list_head ftrace_trace_arrays;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) extern struct mutex trace_types_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) extern int trace_array_get(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) extern int tracing_check_open_get_tr(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) extern struct trace_array *trace_array_find(const char *instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) extern struct trace_array *trace_array_find_get(const char *instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) extern bool trace_clock_in_ns(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * The global tracer (top) should be the first trace array added,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * but we check the flag anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static inline struct trace_array *top_trace_array(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (list_empty(&ftrace_trace_arrays))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) tr = list_entry(ftrace_trace_arrays.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) typeof(*tr), list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) #define FTRACE_CMP_TYPE(var, type) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) __builtin_types_compatible_p(typeof(var), type *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) #undef IF_ASSIGN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) #define IF_ASSIGN(var, entry, etype, id) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (FTRACE_CMP_TYPE(var, etype)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) var = (typeof(var))(entry); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) WARN_ON(id != 0 && (entry)->type != id); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* Will cause compile errors if type is not found. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) extern void __ftrace_bad_type(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * The trace_assign_type is a verifier that the entry type is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * the same as the type being assigned. To add new types simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * add a line with the following format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * IF_ASSIGN(var, ent, type, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * Where "type" is the trace type that includes the trace_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * as the "ent" item. And "id" is the trace identifier that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * used in the trace_type enum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * If the type can have more than one id, then use zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) #define trace_assign_type(var, ent) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) TRACE_MMIO_RW); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) TRACE_MMIO_MAP); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) TRACE_GRAPH_ENT); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) TRACE_GRAPH_RET); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) __ftrace_bad_type(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * An option specific to a tracer. This is a boolean value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * The bit is the bit index that sets its value on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * flags value in struct tracer_flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct tracer_opt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) const char *name; /* Will appear on the trace_options file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) u32 bit; /* Mask assigned in val field in tracer_flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * The set of specific options for a tracer. Your tracer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * have to set the initial value of the flags val.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct tracer_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct tracer_opt *opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct tracer *trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* Makes more easy to define a tracer opt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #define TRACER_OPT(s, b) .name = #s, .bit = b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct trace_option_dentry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct tracer_opt *opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct tracer_flags *flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct dentry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * struct tracer - a specific tracer and its callbacks to interact with tracefs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * @name: the name chosen to select it on the available_tracers file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * @init: called when one switches to this tracer (echo name > current_tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * @reset: called when one switches to another tracer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * @start: called when tracing is unpaused (echo 1 > tracing_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * @stop: called when tracing is paused (echo 0 > tracing_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * @update_thresh: called when tracing_thresh is updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * @open: called when the trace file is opened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * @pipe_open: called when the trace_pipe file is opened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * @close: called when the trace file is released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * @pipe_close: called when the trace_pipe file is released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * @read: override the default read callback on trace_pipe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * @splice_read: override the default splice_read callback on trace_pipe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * @selftest: selftest to run on boot (see trace_selftest.c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * @print_headers: override the first lines that describe your columns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * @print_line: callback that prints a trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * @set_flag: signals one of your private flags changed (trace_options file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * @flags: your private flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct tracer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) int (*init)(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) void (*reset)(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) void (*start)(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) void (*stop)(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int (*update_thresh)(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) void (*open)(struct trace_iterator *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) void (*pipe_open)(struct trace_iterator *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) void (*close)(struct trace_iterator *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) void (*pipe_close)(struct trace_iterator *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ssize_t (*read)(struct trace_iterator *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) size_t cnt, loff_t *ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) ssize_t (*splice_read)(struct trace_iterator *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct pipe_inode_info *pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) unsigned int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) #ifdef CONFIG_FTRACE_STARTUP_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) int (*selftest)(struct tracer *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) void (*print_header)(struct seq_file *m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) enum print_line_t (*print_line)(struct trace_iterator *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* If you handled the flag setting, return 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int (*set_flag)(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) u32 old_flags, u32 bit, int set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* Return 0 if OK with change, else return non-zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int (*flag_changed)(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) u32 mask, int set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct tracer *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct tracer_flags *flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) int enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) bool print_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) bool allow_instances;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) bool use_max_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* True if tracer cannot be enabled in kernel param */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) bool noboot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Only current can touch trace_recursion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * For function tracing recursion:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * The order of these bits are important.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * When function tracing occurs, the following steps are made:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * If arch does not support a ftrace feature:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * call internal function (uses INTERNAL bits) which calls...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * If callback is registered to the "global" list, the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * function is called and recursion checks the GLOBAL bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * then this function calls...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * The function callback, which can use the FTRACE bits to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * check for recursion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* Function recursion bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) TRACE_FTRACE_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) TRACE_FTRACE_NMI_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) TRACE_FTRACE_IRQ_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) TRACE_FTRACE_SIRQ_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) TRACE_FTRACE_TRANSITION_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* Internal use recursion bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) TRACE_INTERNAL_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) TRACE_INTERNAL_NMI_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) TRACE_INTERNAL_IRQ_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) TRACE_INTERNAL_SIRQ_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) TRACE_INTERNAL_TRANSITION_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) TRACE_BRANCH_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * Abuse of the trace_recursion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * As we need a way to maintain state if we are tracing the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * graph in irq because we want to trace a particular function that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * was called in irq context but we have irq tracing off. Since this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * can only be modified by current, we can reuse trace_recursion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) TRACE_IRQ_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* Set if the function is in the set_graph_function file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) TRACE_GRAPH_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * In the very unlikely case that an interrupt came in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * at a start of graph tracing, and we want to trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * the function in that interrupt, the depth can be greater
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * than zero, because of the preempted start of a previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * trace. In an even more unlikely case, depth could be 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * if a softirq interrupted the start of graph tracing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * followed by an interrupt preempting a start of graph
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * tracing in the softirq, and depth can even be 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * if an NMI came in at the start of an interrupt function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * that preempted a softirq start of a function that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * preempted normal context!!!! Luckily, it can't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * greater than 3, so the next two bits are a mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * of what the depth is when we set TRACE_GRAPH_BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) TRACE_GRAPH_DEPTH_START_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) TRACE_GRAPH_DEPTH_END_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * To implement set_graph_notrace, if this bit is set, we ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * function graph tracing of called functions, until the return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * function is called to clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) TRACE_GRAPH_NOTRACE_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) #define trace_recursion_depth() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) #define trace_recursion_set_depth(depth) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) current->trace_recursion &= \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) current->trace_recursion |= \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) #define TRACE_CONTEXT_BITS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) #define TRACE_FTRACE_START TRACE_FTRACE_BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) #define TRACE_LIST_START TRACE_INTERNAL_BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) #define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) TRACE_CTX_NMI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) TRACE_CTX_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) TRACE_CTX_SOFTIRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) TRACE_CTX_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) TRACE_CTX_TRANSITION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static __always_inline int trace_get_context_bit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (in_interrupt()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (in_nmi())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) bit = TRACE_CTX_NMI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) else if (in_irq())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) bit = TRACE_CTX_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) bit = TRACE_CTX_SOFTIRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) bit = TRACE_CTX_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) static __always_inline int trace_test_and_set_recursion(int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) unsigned int val = current->trace_recursion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) bit = trace_get_context_bit() + start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (unlikely(val & (1 << bit))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * It could be that preempt_count has not been updated during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * a switch between contexts. Allow for a single recursion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) bit = start + TRACE_CTX_TRANSITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (trace_recursion_test(bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) trace_recursion_set(bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) val |= 1 << bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) current->trace_recursion = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static __always_inline void trace_clear_recursion(int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) unsigned int val = current->trace_recursion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) bit = 1 << bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) val &= ~bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) current->trace_recursion = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static inline struct ring_buffer_iter *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) trace_buffer_iter(struct trace_iterator *iter, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) int tracer_init(struct tracer *t, struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int tracing_is_enabled(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) void tracing_reset_online_cpus(struct array_buffer *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) void tracing_reset_current(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) void tracing_reset_all_online_cpus(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) int tracing_open_generic(struct inode *inode, struct file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) int tracing_open_generic_tr(struct inode *inode, struct file *filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) bool tracing_is_disabled(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) bool tracer_tracing_is_on(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) void tracer_tracing_on(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) void tracer_tracing_off(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct dentry *trace_create_file(const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) umode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct dentry *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) const struct file_operations *fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) int tracing_init_dentry(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct ring_buffer_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct ring_buffer_event *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) trace_buffer_lock_reserve(struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) int pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct trace_array_cpu *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) int *ent_cpu, u64 *ent_ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct ring_buffer_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) int trace_empty(struct trace_iterator *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) void *trace_find_next_entry_inc(struct trace_iterator *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) void trace_init_global_iter(struct trace_iterator *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) void tracing_iter_reset(struct trace_iterator *iter, int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) unsigned long trace_total_entries(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) void trace_function(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) unsigned long flags, int pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) void trace_graph_function(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) unsigned long flags, int pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) void trace_latency_header(struct seq_file *m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) void trace_default_header(struct seq_file *m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) int trace_empty(struct trace_iterator *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) void trace_graph_return(struct ftrace_graph_ret *trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) int trace_graph_entry(struct ftrace_graph_ent *trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) void set_graph_array(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) void tracing_start_cmdline_record(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) void tracing_stop_cmdline_record(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) void tracing_start_tgid_record(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) void tracing_stop_tgid_record(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) int register_tracer(struct tracer *type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) int is_tracing_stopped(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) extern cpumask_var_t __read_mostly tracing_buffer_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) #define for_each_tracing_cpu(cpu) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) for_each_cpu(cpu, tracing_buffer_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) extern unsigned long nsecs_to_usecs(unsigned long nsecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) extern unsigned long tracing_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /* PID filtering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) extern int pid_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) pid_t search_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) struct trace_pid_list *filtered_no_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct task_struct *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) int trace_pid_show(struct seq_file *m, void *v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) void trace_free_pid_list(struct trace_pid_list *pid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) int trace_pid_write(struct trace_pid_list *filtered_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct trace_pid_list **new_pid_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) const char __user *ubuf, size_t cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) #ifdef CONFIG_TRACER_MAX_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) void *cond_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) void update_max_tr_single(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct task_struct *tsk, int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) #endif /* CONFIG_TRACER_MAX_TRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) defined(CONFIG_FSNOTIFY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) void latency_fsnotify(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static inline void latency_fsnotify(struct trace_array *tr) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) #ifdef CONFIG_STACKTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) int pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) int skip, int pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) #endif /* CONFIG_STACKTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) extern u64 ftrace_now(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) extern void trace_find_cmdline(int pid, char comm[]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) extern int trace_find_tgid(int pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) extern unsigned long ftrace_update_tot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) extern unsigned long ftrace_number_of_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) extern unsigned long ftrace_number_of_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) void ftrace_init_trace_array(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static inline void ftrace_init_trace_array(struct trace_array *tr) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) extern int DYN_FTRACE_TEST_NAME(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) extern int DYN_FTRACE_TEST_NAME2(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) extern bool ring_buffer_expanded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) extern bool tracing_selftest_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) #ifdef CONFIG_FTRACE_STARTUP_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) extern void __init disable_tracing_selftest(const char *reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) extern int trace_selftest_startup_function(struct tracer *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) extern int trace_selftest_startup_function_graph(struct tracer *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) extern int trace_selftest_startup_irqsoff(struct tracer *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) extern int trace_selftest_startup_preemptoff(struct tracer *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) extern int trace_selftest_startup_wakeup(struct tracer *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) extern int trace_selftest_startup_nop(struct tracer *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) extern int trace_selftest_startup_branch(struct tracer *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * Tracer data references selftest functions that only occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * on boot up. These can be __init functions. Thus, when selftests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * are enabled, then the tracers need to reference __init functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) #define __tracer_data __refdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static inline void __init disable_tracing_selftest(const char *reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /* Tracers are seldom changed. Optimize when selftests are disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) #define __tracer_data __read_mostly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) #endif /* CONFIG_FTRACE_STARTUP_TEST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) extern void *head_page(struct trace_array_cpu *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) extern unsigned long long ns2usecs(u64 nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) extern int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) extern int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) trace_vprintk(unsigned long ip, const char *fmt, va_list args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) extern int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) trace_array_vprintk(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) unsigned long ip, const char *fmt, va_list args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) int trace_array_printk_buf(struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) unsigned long ip, const char *fmt, ...);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) void trace_printk_seq(struct trace_seq *s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) enum print_line_t print_trace_line(struct trace_iterator *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) extern char trace_find_mark(unsigned long long duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct ftrace_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct ftrace_mod_load {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) char *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) char *module;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) int enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) FTRACE_HASH_FL_MOD = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) struct ftrace_hash {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) unsigned long size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct hlist_head *buckets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct ftrace_func_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* Standard output formatting function used for function return traces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /* Flag options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) #define TRACE_GRAPH_PRINT_OVERRUN 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) #define TRACE_GRAPH_PRINT_CPU 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) #define TRACE_GRAPH_PRINT_PROC 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) #define TRACE_GRAPH_PRINT_DURATION 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) #define TRACE_GRAPH_PRINT_REL_TIME 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) #define TRACE_GRAPH_PRINT_IRQS 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) #define TRACE_GRAPH_PRINT_TAIL 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) #define TRACE_GRAPH_SLEEP_TIME 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) #define TRACE_GRAPH_GRAPH_TIME 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) extern void ftrace_graph_sleep_time_control(bool enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) #ifdef CONFIG_FUNCTION_PROFILER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) extern void ftrace_graph_graph_time_control(bool enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static inline void ftrace_graph_graph_time_control(bool enable) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) extern enum print_line_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) print_graph_function_flags(struct trace_iterator *iter, u32 flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) extern void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) extern void graph_trace_open(struct trace_iterator *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) extern void graph_trace_close(struct trace_iterator *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) extern int __trace_graph_entry(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct ftrace_graph_ent *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) unsigned long flags, int pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) extern void __trace_graph_return(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct ftrace_graph_ret *trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) unsigned long flags, int pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) extern struct ftrace_hash __rcu *ftrace_graph_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) unsigned long addr = trace->func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct ftrace_hash *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * Have to open code "rcu_dereference_sched()" because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * function graph tracer can be called when RCU is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * "watching".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * Protected with schedule_on_each_cpu(ftrace_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (ftrace_hash_empty(hash)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (ftrace_lookup_ip(hash, addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * This needs to be cleared on the return functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * when the depth is zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) trace_recursion_set(TRACE_GRAPH_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) trace_recursion_set_depth(trace->depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * If no irqs are to be traced, but a set_graph_function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * is set, and called by an interrupt handler, we still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * want to trace it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (in_irq())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) trace_recursion_set(TRACE_IRQ_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) trace_recursion_clear(TRACE_IRQ_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (trace_recursion_test(TRACE_GRAPH_BIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) trace->depth == trace_recursion_depth())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) trace_recursion_clear(TRACE_GRAPH_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static inline int ftrace_graph_notrace_addr(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct ftrace_hash *notrace_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * Have to open code "rcu_dereference_sched()" because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * function graph tracer can be called when RCU is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * "watching".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * Protected with schedule_on_each_cpu(ftrace_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) !preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (ftrace_lookup_ip(notrace_hash, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static inline int ftrace_graph_notrace_addr(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) #endif /* CONFIG_DYNAMIC_FTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) extern unsigned int fgraph_max_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /* trace it when it is-nested-in or is a function enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) ftrace_graph_addr(trace)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) (trace->depth < 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) (fgraph_max_depth && trace->depth >= fgraph_max_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) #else /* CONFIG_FUNCTION_GRAPH_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) static inline enum print_line_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) print_graph_function_flags(struct trace_iterator *iter, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return TRACE_TYPE_UNHANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) extern struct list_head ftrace_pids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) #ifdef CONFIG_FUNCTION_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) #define FTRACE_PID_IGNORE -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) #define FTRACE_PID_TRACE -2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct ftrace_func_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) int (*func)(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct ftrace_hash *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) char *func, char *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) char *params, int enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) extern bool ftrace_filter_param __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static inline int ftrace_trace_task(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) FTRACE_PID_IGNORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) extern int ftrace_is_dead(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) int ftrace_create_function_files(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct dentry *parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) void ftrace_destroy_function_files(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) int ftrace_allocate_ftrace_ops(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) void ftrace_free_ftrace_ops(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) void ftrace_init_global_array_ops(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) void ftrace_reset_array_ops(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) void ftrace_init_tracefs_toplevel(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) struct dentry *d_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) void ftrace_clear_pids(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) int init_function_trace(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static inline int ftrace_trace_task(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static inline int ftrace_is_dead(void) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) ftrace_create_function_files(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct dentry *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static inline __init void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) ftrace_init_global_array_ops(struct trace_array *tr) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static inline void ftrace_clear_pids(struct trace_array *tr) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) static inline int init_function_trace(void) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /* ftace_func_t type is not defined, use macro instead of static inline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) #define ftrace_init_array_ops(tr, func) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) #endif /* CONFIG_FUNCTION_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) struct ftrace_probe_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) void (*func)(unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) int (*init)(struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) unsigned long ip, void *init_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) void **data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) void (*free)(struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) unsigned long ip, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) int (*print)(struct seq_file *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) struct ftrace_func_mapper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) typedef int (*ftrace_mapper_func)(void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) unsigned long ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) unsigned long ip, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) unsigned long ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) ftrace_mapper_func free_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) extern int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) register_ftrace_function_probe(char *glob, struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) struct ftrace_probe_ops *ops, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) extern int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct ftrace_probe_ops *ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) extern void clear_ftrace_function_probes(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) int register_ftrace_command(struct ftrace_func_command *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) int unregister_ftrace_command(struct ftrace_func_command *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) void ftrace_create_filter_files(struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) struct dentry *parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) void ftrace_destroy_filter_files(struct ftrace_ops *ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) int len, int reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) int len, int reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) struct ftrace_func_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) static inline __init int unregister_ftrace_command(char *cmd_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static inline void clear_ftrace_function_probes(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * The ops parameter passed in is usually undefined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * This must be a macro.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) #define ftrace_create_filter_files(ops, parent) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) #define ftrace_destroy_filter_files(ops) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) bool ftrace_event_is_function(struct trace_event_call *call);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * struct trace_parser - servers for reading the user input separated by spaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * @cont: set if the input is not complete - no final space char was found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * @buffer: holds the parsed user input
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * @idx: user input length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * @size: buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct trace_parser {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) bool cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) char *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) unsigned idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) unsigned size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) static inline bool trace_parser_loaded(struct trace_parser *parser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return (parser->idx != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static inline bool trace_parser_cont(struct trace_parser *parser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return parser->cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) static inline void trace_parser_clear(struct trace_parser *parser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) parser->cont = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) parser->idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) extern int trace_parser_get_init(struct trace_parser *parser, int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) extern void trace_parser_put(struct trace_parser *parser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) size_t cnt, loff_t *ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * Only create function graph options if function graph is configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) # define FGRAPH_FLAGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) C(DISPLAY_GRAPH, "display-graph"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) # define FGRAPH_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) #ifdef CONFIG_BRANCH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) # define BRANCH_FLAGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) C(BRANCH, "branch"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) # define BRANCH_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) #ifdef CONFIG_FUNCTION_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) # define FUNCTION_FLAGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) C(FUNCTION, "function-trace"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) C(FUNC_FORK, "function-fork"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) # define FUNCTION_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) # define FUNCTION_DEFAULT_FLAGS 0UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) # define TRACE_ITER_FUNC_FORK 0UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) #ifdef CONFIG_STACKTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) # define STACK_FLAGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) C(STACKTRACE, "stacktrace"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) # define STACK_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * trace_iterator_flags is an enumeration that defines bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * positions into trace_flags that controls the output.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * NOTE: These bits must match the trace_options array in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * trace.c (this macro guarantees it).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) #define TRACE_FLAGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) C(PRINT_PARENT, "print-parent"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) C(SYM_OFFSET, "sym-offset"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) C(SYM_ADDR, "sym-addr"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) C(VERBOSE, "verbose"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) C(RAW, "raw"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) C(HEX, "hex"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) C(BIN, "bin"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) C(BLOCK, "block"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) C(PRINTK, "trace_printk"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) C(ANNOTATE, "annotate"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) C(USERSTACKTRACE, "userstacktrace"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) C(SYM_USEROBJ, "sym-userobj"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) C(PRINTK_MSGONLY, "printk-msg-only"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) C(LATENCY_FMT, "latency-format"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) C(RECORD_CMD, "record-cmd"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) C(RECORD_TGID, "record-tgid"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) C(OVERWRITE, "overwrite"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) C(STOP_ON_FREE, "disable_on_free"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) C(IRQ_INFO, "irq-info"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) C(MARKERS, "markers"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) C(EVENT_FORK, "event-fork"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) C(PAUSE_ON_TRACE, "pause-on-trace"), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) FUNCTION_FLAGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) FGRAPH_FLAGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) STACK_FLAGS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) BRANCH_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * By defining C, we can make TRACE_FLAGS a list of bit names
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * that will define the bits for the flag masks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) #undef C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) #define C(a, b) TRACE_ITER_##a##_BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) enum trace_iterator_bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) TRACE_FLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /* Make sure we don't go more than we have bits for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) TRACE_ITER_LAST_BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * By redefining C, we can make TRACE_FLAGS a list of masks that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * use the bits as defined above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) #undef C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) enum trace_iterator_flags { TRACE_FLAGS };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * TRACE_ITER_SYM_MASK masks the options in trace_flags that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * control the output of kernel symbols.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) #define TRACE_ITER_SYM_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) extern struct tracer nop_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) #ifdef CONFIG_BRANCH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) extern int enable_branch_tracing(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) extern void disable_branch_tracing(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) static inline int trace_branch_enable(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (tr->trace_flags & TRACE_ITER_BRANCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) return enable_branch_tracing(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static inline void trace_branch_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) /* due to races, always disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) disable_branch_tracing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static inline int trace_branch_enable(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) static inline void trace_branch_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) #endif /* CONFIG_BRANCH_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /* set ring buffers to default size if not already done so */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) int tracing_update_buffers(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct ftrace_event_field {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) struct list_head link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) const char *type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) int filter_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) int is_signed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) struct prog_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct event_filter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) struct prog_entry __rcu *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) char *filter_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) struct event_subsystem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) struct event_filter *filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) int ref_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) struct trace_subsystem_dir {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) struct event_subsystem *subsystem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) struct dentry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) int ref_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) int nr_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) struct ring_buffer_event *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) void trace_buffer_unlock_commit_regs(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) struct ring_buffer_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) unsigned long flags, int pc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) static inline void trace_buffer_unlock_commit(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) struct ring_buffer_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) unsigned long flags, int pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) DECLARE_PER_CPU(int, trace_buffered_event_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) void trace_buffered_event_disable(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) void trace_buffered_event_enable(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) __trace_event_discard_commit(struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) struct ring_buffer_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (this_cpu_read(trace_buffered_event) == event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) /* Simply release the temp buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) this_cpu_dec(trace_buffered_event_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) ring_buffer_discard_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * Helper function for event_trigger_unlock_commit{_regs}().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) * If there are event triggers attached to this event that requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) * filtering against its fields, then they will be called as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) * entry already holds the field information of the current event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * It also checks if the event should be discarded or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * It is to be discarded if the event is soft disabled and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * event was only recorded to process triggers, or if the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * filter is active and this event did not match the filters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * Returns true if the event is discarded, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) __event_trigger_test_discard(struct trace_event_file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) struct ring_buffer_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) void *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) enum event_trigger_type *tt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) unsigned long eflags = file->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (eflags & EVENT_FILE_FL_TRIGGER_COND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) *tt = event_triggers_call(file, entry, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) EVENT_FILE_FL_FILTERED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) EVENT_FILE_FL_PID_FILTER))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (file->flags & EVENT_FILE_FL_FILTERED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) !filter_match_preds(file->filter, entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) trace_event_ignore_this_pid(file))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) goto discard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) discard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) __trace_event_discard_commit(buffer, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * event_trigger_unlock_commit - handle triggers and finish event commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * @file: The file pointer assoctiated to the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * @buffer: The ring buffer that the event is being written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * @event: The event meta data in the ring buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * @entry: The event itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * @irq_flags: The state of the interrupts at the start of the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * @pc: The state of the preempt count at the start of the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) * This is a helper function to handle triggers that require data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) * from the event itself. It also tests the event against filters and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * if the event is soft disabled and should be discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) event_trigger_unlock_commit(struct trace_event_file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) struct ring_buffer_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) void *entry, unsigned long irq_flags, int pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) enum event_trigger_type tt = ETT_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) if (tt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) event_triggers_post_call(file, tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) * event_trigger_unlock_commit_regs - handle triggers and finish event commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) * @file: The file pointer assoctiated to the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) * @buffer: The ring buffer that the event is being written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) * @event: The event meta data in the ring buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) * @entry: The event itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * @irq_flags: The state of the interrupts at the start of the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) * @pc: The state of the preempt count at the start of the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * This is a helper function to handle triggers that require data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * from the event itself. It also tests the event against filters and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * if the event is soft disabled and should be discarded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) * Same as event_trigger_unlock_commit() but calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) event_trigger_unlock_commit_regs(struct trace_event_file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) struct trace_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) struct ring_buffer_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) void *entry, unsigned long irq_flags, int pc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) enum event_trigger_type tt = ETT_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) trace_buffer_unlock_commit_regs(file->tr, buffer, event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) irq_flags, pc, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (tt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) event_triggers_post_call(file, tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) #define FILTER_PRED_INVALID ((unsigned short)-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) #define FILTER_PRED_IS_RIGHT (1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) #define FILTER_PRED_FOLD (1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) * The max preds is the size of unsigned short with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) * two flags at the MSBs. One bit is used for both the IS_RIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * and FOLD flags. The other is reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * 2^14 preds is way more than enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) #define MAX_FILTER_PRED 16384
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) struct filter_pred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) struct regex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) typedef int (*regex_match_func)(char *str, struct regex *r, int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) enum regex_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) MATCH_FULL = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) MATCH_FRONT_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) MATCH_MIDDLE_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) MATCH_END_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) MATCH_GLOB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) MATCH_INDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) struct regex {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) char pattern[MAX_FILTER_STR_VAL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) int field_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) regex_match_func match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) struct filter_pred {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) filter_pred_fn_t fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) struct regex regex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) unsigned short *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) struct ftrace_event_field *field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) int not;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) int op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) static inline bool is_string_field(struct ftrace_event_field *field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) return field->filter_type == FILTER_DYN_STRING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) field->filter_type == FILTER_STATIC_STRING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) field->filter_type == FILTER_PTR_STRING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) field->filter_type == FILTER_COMM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) static inline bool is_function_field(struct ftrace_event_field *field)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return field->filter_type == FILTER_TRACE_FN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) extern enum regex_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) filter_parse_regex(char *buff, int len, char **search, int *not);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) extern void print_event_filter(struct trace_event_file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) struct trace_seq *s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) extern int apply_event_filter(struct trace_event_file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) char *filter_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) char *filter_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) extern void print_subsystem_event_filter(struct event_subsystem *system,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) struct trace_seq *s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) extern int filter_assign_type(const char *type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) extern int create_event_filter(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) struct trace_event_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) char *filter_str, bool set_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) struct event_filter **filterp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) extern void free_event_filter(struct event_filter *filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct ftrace_event_field *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) trace_find_event_field(struct trace_event_call *call, char *name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) extern void trace_event_enable_cmd_record(bool enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) extern void trace_event_enable_tgid_record(bool enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) extern int event_trace_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) extern int event_trace_del_tracer(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) extern void __trace_early_add_events(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) extern struct trace_event_file *__find_event_file(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) const char *system,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) const char *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) extern struct trace_event_file *find_event_file(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) const char *system,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) const char *event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) static inline void *event_file_data(struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) return READ_ONCE(file_inode(filp)->i_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) extern struct mutex event_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) extern struct list_head ftrace_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) extern const struct file_operations event_trigger_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) extern const struct file_operations event_hist_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) extern const struct file_operations event_hist_debug_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) extern const struct file_operations event_inject_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) #ifdef CONFIG_HIST_TRIGGERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) extern int register_trigger_hist_cmd(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) extern int register_trigger_hist_enable_disable_cmds(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) static inline int register_trigger_hist_cmd(void) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) extern int register_trigger_cmds(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) extern void clear_event_triggers(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) struct event_trigger_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) int ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) struct event_trigger_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) struct event_command *cmd_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) struct event_filter __rcu *filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) char *filter_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) void *private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) bool paused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) bool paused_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) struct list_head named_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) struct event_trigger_data *named_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) /* Avoid typos */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) #define ENABLE_EVENT_STR "enable_event"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) #define DISABLE_EVENT_STR "disable_event"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) #define ENABLE_HIST_STR "enable_hist"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) #define DISABLE_HIST_STR "disable_hist"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) struct enable_trigger_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) struct trace_event_file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) bool enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) bool hist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) extern int event_enable_trigger_print(struct seq_file *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) struct event_trigger_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) struct event_trigger_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) extern void event_enable_trigger_free(struct event_trigger_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) struct event_trigger_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) extern int event_enable_trigger_func(struct event_command *cmd_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) struct trace_event_file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) char *glob, char *cmd, char *param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) extern int event_enable_register_trigger(char *glob,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct event_trigger_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct event_trigger_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct trace_event_file *file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) extern void event_enable_unregister_trigger(char *glob,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) struct event_trigger_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) struct event_trigger_data *test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) struct trace_event_file *file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) extern void trigger_data_free(struct event_trigger_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) extern int event_trigger_init(struct event_trigger_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) struct event_trigger_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) int trigger_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) extern void update_cond_flag(struct trace_event_file *file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) extern int set_trigger_filter(char *filter_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) struct event_trigger_data *trigger_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) struct trace_event_file *file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) extern struct event_trigger_data *find_named_trigger(const char *name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) extern bool is_named_trigger(struct event_trigger_data *test);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) extern int save_named_trigger(const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct event_trigger_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) extern void del_named_trigger(struct event_trigger_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) extern void pause_named_trigger(struct event_trigger_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) extern void unpause_named_trigger(struct event_trigger_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) extern void set_named_trigger_data(struct event_trigger_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) struct event_trigger_data *named_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) extern struct event_trigger_data *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) get_named_trigger_data(struct event_trigger_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) extern int register_event_command(struct event_command *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) extern int unregister_event_command(struct event_command *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) extern int register_trigger_hist_enable_disable_cmds(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * struct event_trigger_ops - callbacks for trace event triggers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) * The methods in this structure provide per-event trigger hooks for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * various trigger operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * All the methods below, except for @init() and @free(), must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * implemented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) * @func: The trigger 'probe' function called when the triggering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) * event occurs. The data passed into this callback is the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * that was supplied to the event_command @reg() function that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * registered the trigger (see struct event_command) along with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) * the trace record, rec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * @init: An optional initialization function called for the trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * when the trigger is registered (via the event_command reg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * function). This can be used to perform per-trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * initialization such as incrementing a per-trigger reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * count, for instance. This is usually implemented by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * generic utility function @event_trigger_init() (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * trace_event_triggers.c).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * @free: An optional de-initialization function called for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * trigger when the trigger is unregistered (via the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * event_command @reg() function). This can be used to perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) * per-trigger de-initialization such as decrementing a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) * per-trigger reference count and freeing corresponding trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) * data, for instance. This is usually implemented by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) * generic utility function @event_trigger_free() (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) * trace_event_triggers.c).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) * @print: The callback function invoked to have the trigger print
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) * itself. This is usually implemented by a wrapper function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) * that calls the generic utility function @event_trigger_print()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * (see trace_event_triggers.c).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) struct event_trigger_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) void (*func)(struct event_trigger_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) void *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) struct ring_buffer_event *rbe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) int (*init)(struct event_trigger_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) struct event_trigger_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) void (*free)(struct event_trigger_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) struct event_trigger_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) int (*print)(struct seq_file *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) struct event_trigger_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) struct event_trigger_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) * struct event_command - callbacks and data members for event commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * Event commands are invoked by users by writing the command name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * into the 'trigger' file associated with a trace event. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * parameters associated with a specific invocation of an event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * command are used to create an event trigger instance, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * added to the list of trigger instances associated with that trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) * event. When the event is hit, the set of triggers associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * that event is invoked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) * The data members in this structure provide per-event command data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) * for various event commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * All the data members below, except for @post_trigger, must be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * for each event command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * @name: The unique name that identifies the event command. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * the name used when setting triggers via trigger files.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * @trigger_type: A unique id that identifies the event command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * 'type'. This value has two purposes, the first to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * only one trigger of the same type can be set at a given time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * for a particular event e.g. it doesn't make sense to have both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * a traceon and traceoff trigger attached to a single event at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * the same time, so traceon and traceoff have the same type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * though they have different names. The @trigger_type value is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * also used as a bit value for deferring the actual trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) * action until after the current event is finished. Some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) * commands need to do this if they themselves log to the trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) * buffer (see the @post_trigger() member below). @trigger_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * values are defined by adding new values to the trigger_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * enum in include/linux/trace_events.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) * @flags: See the enum event_command_flags below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) * All the methods below, except for @set_filter() and @unreg_all(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) * must be implemented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) * @func: The callback function responsible for parsing and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) * registering the trigger written to the 'trigger' file by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) * user. It allocates the trigger instance and registers it with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * the appropriate trace event. It makes use of the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) * event_command callback functions to orchestrate this, and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * usually implemented by the generic utility function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * @event_trigger_callback() (see trace_event_triggers.c).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * @reg: Adds the trigger to the list of triggers associated with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) * event, and enables the event trigger itself, after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * initializing it (via the event_trigger_ops @init() function).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * This is also where commands can use the @trigger_type value to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * make the decision as to whether or not multiple instances of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) * the trigger should be allowed. This is usually implemented by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) * the generic utility function @register_trigger() (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * trace_event_triggers.c).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * @unreg: Removes the trigger from the list of triggers associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * with the event, and disables the event trigger itself, after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * initializing it (via the event_trigger_ops @free() function).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * This is usually implemented by the generic utility function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) * @unregister_trigger() (see trace_event_triggers.c).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) * @unreg_all: An optional function called to remove all the triggers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) * from the list of triggers associated with the event. Called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) * when a trigger file is opened in truncate mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) * @set_filter: An optional function called to parse and set a filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) * for the trigger. If no @set_filter() method is set for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) * event command, filters set by the user for the command will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) * ignored. This is usually implemented by the generic utility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) * function @set_trigger_filter() (see trace_event_triggers.c).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) * @get_trigger_ops: The callback function invoked to retrieve the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) * event_trigger_ops implementation associated with the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) struct event_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) enum event_trigger_type trigger_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) int (*func)(struct event_command *cmd_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) struct trace_event_file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) char *glob, char *cmd, char *params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) int (*reg)(char *glob,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) struct event_trigger_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) struct event_trigger_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) struct trace_event_file *file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) void (*unreg)(char *glob,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) struct event_trigger_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) struct event_trigger_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) struct trace_event_file *file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) void (*unreg_all)(struct trace_event_file *file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) int (*set_filter)(char *filter_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) struct event_trigger_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) struct trace_event_file *file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * enum event_command_flags - flags for struct event_command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) * @POST_TRIGGER: A flag that says whether or not this command needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * to have its action delayed until after the current event has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) * been closed. Some triggers need to avoid being invoked while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * an event is currently in the process of being logged, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) * the trigger may itself log data into the trace buffer. Thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * we make sure the current event is committed before invoking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) * those triggers. To do that, the trigger invocation is split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * in two - the first part checks the filter using the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) * trace record; if a command has the @post_trigger flag set, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * sets a bit for itself in the return value, otherwise it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) * directly invokes the trigger. Once all commands have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) * either invoked or set their return flag, the current record is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) * either committed or discarded. At that point, if any commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) * have deferred their triggers, those commands are finally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) * invoked following the close of the current event. In other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) * words, if the event_trigger_ops @func() probe implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) * itself logs to the trace buffer, this flag should be set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) * otherwise it can be left unspecified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * @NEEDS_REC: A flag that says whether or not this command needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * access to the trace record in order to perform its function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) * regardless of whether or not it has a filter associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) * it (filters make a trigger require access to the trace record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) * but are not always present).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) enum event_command_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) EVENT_CMD_FL_POST_TRIGGER = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) EVENT_CMD_FL_NEEDS_REC = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) static inline bool event_command_post_trigger(struct event_command *cmd_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) static inline bool event_command_needs_rec(struct event_command *cmd_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) extern int trace_event_enable_disable(struct trace_event_file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) int enable, int soft_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) extern int tracing_alloc_snapshot(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) extern int tracing_snapshot_cond_disable(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) extern void *tracing_cond_snapshot_data(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) extern const char *__start___trace_bprintk_fmt[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) extern const char *__stop___trace_bprintk_fmt[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) extern const char *__start___tracepoint_str[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) extern const char *__stop___tracepoint_str[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) void trace_printk_control(bool enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) void trace_printk_start_comm(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) /* Used from boot time tracer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) extern int trace_set_options(struct trace_array *tr, char *option);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) unsigned long size, int cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) extern int tracing_set_cpumask(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) cpumask_var_t tracing_cpumask_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) #define MAX_EVENT_NAME_LEN 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) extern ssize_t trace_parse_run_command(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) const char __user *buffer, size_t count, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) int (*createfn)(int, char**));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) extern unsigned int err_pos(char *cmd, const char *str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) extern void tracing_log_err(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) const char *loc, const char *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) const char **errs, u8 type, u8 pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) * Normal trace_printk() and friends allocates special buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) * to do the manipulation, as well as saves the print formats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) * into sections to display. But the trace infrastructure wants
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) * to use these without the added overhead at the price of being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) * a bit slower (used mainly for warnings, where we don't care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) * about performance). The internal_trace_puts() is for such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) * a purpose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) #undef FTRACE_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) extern struct trace_event_call \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) __aligned(4) event_##call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) #undef FTRACE_ENTRY_DUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) #undef FTRACE_ENTRY_PACKED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) #include "trace_entries.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) int perf_ftrace_event_register(struct trace_event_call *call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) enum trace_reg type, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) #define perf_ftrace_event_register NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) #ifdef CONFIG_FTRACE_SYSCALLS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) void init_ftrace_syscalls(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) const char *get_syscall_name(int syscall);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) static inline void init_ftrace_syscalls(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) static inline const char *get_syscall_name(int syscall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) #ifdef CONFIG_EVENT_TRACING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) void trace_event_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) void trace_event_eval_update(struct trace_eval_map **map, int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) /* Used from boot time tracer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) extern int trigger_process_regex(struct trace_event_file *file, char *buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) static inline void __init trace_event_init(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) #ifdef CONFIG_TRACER_SNAPSHOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) void tracing_snapshot_instance(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) int tracing_alloc_snapshot_instance(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) static inline void tracing_snapshot_instance(struct trace_array *tr) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) #ifdef CONFIG_PREEMPT_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) void tracer_preempt_on(unsigned long a0, unsigned long a1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) void tracer_preempt_off(unsigned long a0, unsigned long a1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) #ifdef CONFIG_IRQSOFF_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) extern struct trace_iterator *tracepoint_print_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) * Reset the state of the trace_iterator so that it can read consumed data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) * Normally, the trace_iterator is used for reading the data when it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) * consumed, and must retain state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) const size_t offset = offsetof(struct trace_iterator, seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) * Keep gcc from complaining about overwriting more than just one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) * member in the structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) iter->pos = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) /* Check the name is good for event/group/fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) static inline bool is_good_name(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (!isalpha(*name) && *name != '_')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) while (*++name != '\0') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) if (!isalpha(*name) && !isdigit(*name) && *name != '_')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) #endif /* _LINUX_KERNEL_TRACE_H */