Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * ring buffer based function tracer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Based on code from the latency_tracer, that is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *  Copyright (C) 2004-2006 Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *  Copyright (C) 2004 Nadia Yvette Chambers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/ring_buffer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) static void tracing_start_function_trace(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) static void tracing_stop_function_trace(struct trace_array *tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) function_trace_call(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 		    struct ftrace_ops *op, struct pt_regs *pt_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 			  struct ftrace_ops *op, struct pt_regs *pt_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static struct tracer_flags func_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /* Our option */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	TRACE_FUNC_OPT_STACK	= 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) int ftrace_allocate_ftrace_ops(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	struct ftrace_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	/* The top level array uses the "global_ops" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	/* Currently only the non stack version is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	ops->func = function_trace_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	tr->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	ops->private = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) void ftrace_free_ftrace_ops(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	kfree(tr->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	tr->ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) int ftrace_create_function_files(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 				 struct dentry *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 * The top level array uses the "global_ops", and the files are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 * created on boot up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (!tr->ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	ftrace_create_filter_files(tr->ops, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) void ftrace_destroy_function_files(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	ftrace_destroy_filter_files(tr->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	ftrace_free_ftrace_ops(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static int function_trace_init(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	ftrace_func_t func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	 * Instance trace_arrays get their ops allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	 * at instance creation. Unless it failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	 * the allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	if (!tr->ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	/* Currently only the global instance can do stack tracing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	    func_flags.val & TRACE_FUNC_OPT_STACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		func = function_stack_trace_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		func = function_trace_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	ftrace_init_array_ops(tr, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	tr->array_buffer.cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	tracing_start_cmdline_record();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	tracing_start_function_trace(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static void function_trace_reset(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	tracing_stop_function_trace(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	tracing_stop_cmdline_record();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	ftrace_reset_array_ops(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static void function_trace_start(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	tracing_reset_online_cpus(&tr->array_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) function_trace_call(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		    struct ftrace_ops *op, struct pt_regs *pt_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	struct trace_array *tr = op->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct trace_array_cpu *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	int pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (unlikely(!tr->function_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	pc = preempt_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	bit = trace_test_and_set_recursion(TRACE_FTRACE_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	if (bit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	if (!atomic_read(&data->disabled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		local_save_flags(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		trace_function(tr, ip, parent_ip, flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	trace_clear_recursion(bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #ifdef CONFIG_UNWINDER_ORC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * Skip 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *   function_stack_trace_call()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  *   ftrace_call()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define STACK_SKIP 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * Skip 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  *   __trace_stack()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  *   function_stack_trace_call()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  *   ftrace_call()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define STACK_SKIP 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			  struct ftrace_ops *op, struct pt_regs *pt_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	struct trace_array *tr = op->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	struct trace_array_cpu *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	long disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	int pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	if (unlikely(!tr->function_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	 * Need to use raw, since this must be called before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	 * recursive protection is performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	disabled = atomic_inc_return(&data->disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	if (likely(disabled == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		pc = preempt_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		trace_function(tr, ip, parent_ip, flags, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		__trace_stack(tr, flags, STACK_SKIP, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	atomic_dec(&data->disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static struct tracer_opt func_opts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #ifdef CONFIG_STACKTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	{ } /* Always set a last empty entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static struct tracer_flags func_flags = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	.val = 0, /* By default: all flags disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	.opts = func_opts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static void tracing_start_function_trace(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	tr->function_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	register_ftrace_function(tr->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	tr->function_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static void tracing_stop_function_trace(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	tr->function_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	unregister_ftrace_function(tr->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static struct tracer function_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	switch (bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	case TRACE_FUNC_OPT_STACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		/* do nothing if already set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		/* We can change this flag when not running. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		if (tr->current_trace != &function_trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		unregister_ftrace_function(tr->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		if (set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			tr->ops->func = function_stack_trace_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			register_ftrace_function(tr->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			tr->ops->func = function_trace_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			register_ftrace_function(tr->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static struct tracer function_trace __tracer_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	.name		= "function",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	.init		= function_trace_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	.reset		= function_trace_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	.start		= function_trace_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	.flags		= &func_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	.set_flag	= func_set_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	.allow_instances = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) #ifdef CONFIG_FTRACE_SELFTEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	.selftest	= trace_selftest_startup_function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static void update_traceon_count(struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 				 unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 				 struct trace_array *tr, bool on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 				 void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	struct ftrace_func_mapper *mapper = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	long *count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	long old_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	 * Tracing gets disabled (or enabled) once per count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	 * This function can be called at the same time on multiple CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	 * It is fine if both disable (or enable) tracing, as disabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	 * (or enabling) the second time doesn't do anything as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	 * state of the tracer is already disabled (or enabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	 * What needs to be synchronized in this case is that the count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	 * only gets decremented once, even if the tracer is disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	 * (or enabled) twice, as the second one is really a nop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	 * The memory barriers guarantee that we only decrement the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	 * counter once. First the count is read to a local variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	 * and a read barrier is used to make sure that it is loaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	 * before checking if the tracer is in the state we want.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	 * If the tracer is not in the state we want, then the count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	 * is guaranteed to be the old count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	 * Next the tracer is set to the state we want (disabled or enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	 * then a write memory barrier is used to make sure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	 * the new state is visible before changing the counter by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	 * one minus the old counter. This guarantees that another CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	 * executing this code will see the new state before seeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	 * the new counter value, and would not do anything if the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	 * counter is seen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	 * Note, there is no synchronization between this and a user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	 * setting the tracing_on file. But we currently don't care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	 * about that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	old_count = *count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	if (old_count <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	/* Make sure we see count before checking tracing state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	if (on == !!tracer_tracing_is_on(tr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		tracer_tracing_on(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		tracer_tracing_off(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	/* Make sure tracing state is visible before updating count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	*count = old_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		     void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	update_traceon_count(ops, ip, tr, 1, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		      void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	update_traceon_count(ops, ip, tr, 0, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ftrace_traceon(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	       struct trace_array *tr, struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	       void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	if (tracer_tracing_is_on(tr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	tracer_tracing_on(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		struct trace_array *tr, struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	if (!tracer_tracing_is_on(tr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	tracer_tracing_off(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) #ifdef CONFIG_UNWINDER_ORC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)  * Skip 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)  *   function_trace_probe_call()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)  *   ftrace_ops_assist_func()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)  *   ftrace_call()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #define FTRACE_STACK_SKIP 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)  * Skip 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)  *   __trace_stack()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)  *   ftrace_stacktrace()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)  *   function_trace_probe_call()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)  *   ftrace_ops_assist_func()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)  *   ftrace_call()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) #define FTRACE_STACK_SKIP 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static __always_inline void trace_stack(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	int pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	local_save_flags(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	pc = preempt_count();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	__trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		  void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	trace_stack(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 			struct trace_array *tr, struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	struct ftrace_func_mapper *mapper = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	long *count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	long old_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	long new_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	if (!tracing_is_on())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	/* unlimited? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	if (!mapper) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		trace_stack(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	 * Stack traces should only execute the number of times the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	 * user specified in the counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		old_count = *count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		if (!old_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		new_count = old_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		new_count = cmpxchg(count, old_count, new_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		if (new_count == old_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 			trace_stack(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		if (!tracing_is_on())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	} while (new_count != old_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 			void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	struct ftrace_func_mapper *mapper = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	long *count = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	if (mapper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		if (*count <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		(*count)--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		  void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	if (update_count(ops, ip, data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		ftrace_dump(DUMP_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /* Only dump the current CPU buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		     void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	if (update_count(ops, ip, data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		ftrace_dump(DUMP_ORIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ftrace_probe_print(const char *name, struct seq_file *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		   unsigned long ip, struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		   void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	struct ftrace_func_mapper *mapper = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	long *count = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	seq_printf(m, "%ps:%s", (void *)ip, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	if (mapper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		seq_printf(m, ":count=%ld\n", *count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		seq_puts(m, ":unlimited\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ftrace_traceon_print(struct seq_file *m, unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		     struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		     void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	return ftrace_probe_print("traceon", m, ip, ops, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 			 struct ftrace_probe_ops *ops, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	return ftrace_probe_print("traceoff", m, ip, ops, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 			struct ftrace_probe_ops *ops, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	return ftrace_probe_print("stacktrace", m, ip, ops, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) ftrace_dump_print(struct seq_file *m, unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 			struct ftrace_probe_ops *ops, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	return ftrace_probe_print("dump", m, ip, ops, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 			struct ftrace_probe_ops *ops, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	return ftrace_probe_print("cpudump", m, ip, ops, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		  unsigned long ip, void *init_data, void **data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	struct ftrace_func_mapper *mapper = *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	if (!mapper) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		mapper = allocate_ftrace_func_mapper();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		if (!mapper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		*data = mapper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		  unsigned long ip, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	struct ftrace_func_mapper *mapper = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	if (!ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		free_ftrace_func_mapper(mapper, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	ftrace_func_mapper_remove_ip(mapper, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static struct ftrace_probe_ops traceon_count_probe_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	.func			= ftrace_traceon_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	.print			= ftrace_traceon_print,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	.init			= ftrace_count_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	.free			= ftrace_count_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static struct ftrace_probe_ops traceoff_count_probe_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	.func			= ftrace_traceoff_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	.print			= ftrace_traceoff_print,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	.init			= ftrace_count_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	.free			= ftrace_count_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static struct ftrace_probe_ops stacktrace_count_probe_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	.func			= ftrace_stacktrace_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	.print			= ftrace_stacktrace_print,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	.init			= ftrace_count_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	.free			= ftrace_count_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static struct ftrace_probe_ops dump_probe_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	.func			= ftrace_dump_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	.print			= ftrace_dump_print,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	.init			= ftrace_count_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	.free			= ftrace_count_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static struct ftrace_probe_ops cpudump_probe_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	.func			= ftrace_cpudump_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	.print			= ftrace_cpudump_print,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static struct ftrace_probe_ops traceon_probe_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	.func			= ftrace_traceon,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	.print			= ftrace_traceon_print,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static struct ftrace_probe_ops traceoff_probe_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	.func			= ftrace_traceoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	.print			= ftrace_traceoff_print,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static struct ftrace_probe_ops stacktrace_probe_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	.func			= ftrace_stacktrace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	.print			= ftrace_stacktrace_print,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ftrace_trace_probe_callback(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 			    struct ftrace_probe_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 			    struct ftrace_hash *hash, char *glob,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 			    char *cmd, char *param, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	void *count = (void *)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	char *number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	/* hash funcs only work with set_ftrace_filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	if (!enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	if (glob[0] == '!')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	if (!param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		goto out_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	number = strsep(&param, ":");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	if (!strlen(number))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 		goto out_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	 * We use the callback data field (which is a pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	 * as our counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	ret = kstrtoul(number, 0, (unsigned long *)&count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)  out_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	ret = register_ftrace_function_probe(glob, tr, ops, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	return ret < 0 ? ret : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 			    char *glob, char *cmd, char *param, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	struct ftrace_probe_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	/* we register both traceon and traceoff to this callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	if (strcmp(cmd, "traceon") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 					   param, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 			   char *glob, char *cmd, char *param, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	struct ftrace_probe_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 					   param, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 			   char *glob, char *cmd, char *param, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	struct ftrace_probe_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	ops = &dump_probe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	/* Only dump once. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 					   "1", enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 			   char *glob, char *cmd, char *param, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	struct ftrace_probe_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	ops = &cpudump_probe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	/* Only dump once. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 					   "1", enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) static struct ftrace_func_command ftrace_traceon_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	.name			= "traceon",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	.func			= ftrace_trace_onoff_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static struct ftrace_func_command ftrace_traceoff_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	.name			= "traceoff",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	.func			= ftrace_trace_onoff_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static struct ftrace_func_command ftrace_stacktrace_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	.name			= "stacktrace",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 	.func			= ftrace_stacktrace_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) static struct ftrace_func_command ftrace_dump_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	.name			= "dump",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	.func			= ftrace_dump_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) static struct ftrace_func_command ftrace_cpudump_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	.name			= "cpudump",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	.func			= ftrace_cpudump_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) static int __init init_func_cmd_traceon(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	ret = register_ftrace_command(&ftrace_traceon_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 		goto out_free_traceoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 		goto out_free_traceon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	ret = register_ftrace_command(&ftrace_dump_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 		goto out_free_stacktrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	ret = register_ftrace_command(&ftrace_cpudump_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 		goto out_free_dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)  out_free_dump:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	unregister_ftrace_command(&ftrace_dump_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)  out_free_stacktrace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	unregister_ftrace_command(&ftrace_stacktrace_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)  out_free_traceon:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	unregister_ftrace_command(&ftrace_traceon_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)  out_free_traceoff:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	unregister_ftrace_command(&ftrace_traceoff_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static inline int init_func_cmd_traceon(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) #endif /* CONFIG_DYNAMIC_FTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) __init int init_function_trace(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	init_func_cmd_traceon();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	return register_tracer(&function_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }