Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Infrastructure for profiling code inserted by 'gcc -pg'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Originally ported from the -rt patch by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Based on code in the latency_tracer, that is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *  Copyright (C) 2004-2006 Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *  Copyright (C) 2004 Nadia Yvette Chambers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/stop_machine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/tracefs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/bsearch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <trace/events/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include "ftrace_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include "trace_output.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include "trace_stat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define FTRACE_WARN_ON(cond)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	({					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 		int ___r = cond;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 		if (WARN_ON(___r))		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 			ftrace_kill();		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 		___r;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define FTRACE_WARN_ON_ONCE(cond)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	({					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 		int ___r = cond;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		if (WARN_ON_ONCE(___r))		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 			ftrace_kill();		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		___r;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) /* hash bits for specific function selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define FTRACE_HASH_DEFAULT_BITS 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define FTRACE_HASH_MAX_BITS 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define INIT_OPS_HASH(opsname)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	.func_hash		= &opsname.local_hash,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define INIT_OPS_HASH(opsname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	FTRACE_MODIFY_ENABLE_FL		= (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	FTRACE_MODIFY_MAY_SLEEP_FL	= (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) struct ftrace_ops ftrace_list_end __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	.func		= ftrace_stub,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	INIT_OPS_HASH(ftrace_list_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) /* ftrace_enabled is a method to turn ftrace on or off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) int ftrace_enabled __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static int last_ftrace_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) /* Current function tracing op */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) /* What to set function_trace_op to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) static struct ftrace_ops *set_function_trace_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) static bool ftrace_pids_enabled(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	tr = ops->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	return tr->function_pids != NULL || tr->function_no_pids != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static void ftrace_update_trampoline(struct ftrace_ops *ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  * ftrace_disabled is set when an anomaly is discovered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)  * ftrace_disabled is much stronger than ftrace_enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) static int ftrace_disabled __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) DEFINE_MUTEX(ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) struct ftrace_ops global_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) #if ARCH_SUPPORTS_FTRACE_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 				 struct ftrace_ops *op, struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) /* See comment below, where ftrace_ops_list_func is defined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) static inline void ftrace_ops_init(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		mutex_init(&ops->local_hash.regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		ops->func_hash = &ops->local_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 			    struct ftrace_ops *op, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	struct trace_array *tr = op->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	int pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	if (tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		if (pid == FTRACE_PID_IGNORE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		if (pid != FTRACE_PID_TRACE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		    pid != current->pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	op->saved_func(ip, parent_ip, op, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) static void ftrace_sync_ipi(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	/* Probably not needed, but do it anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	 * then it needs to call the list anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	    FTRACE_FORCE_LIST_FUNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		return ftrace_ops_list_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	return ftrace_ops_get_func(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) static void update_ftrace_function(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	ftrace_func_t func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	 * Prepare the ftrace_ops that the arch callback will use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	 * If there's only one ftrace_ops registered, the ftrace_ops_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	 * will point to the ops we want.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 						lockdep_is_held(&ftrace_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	/* If there's no ftrace_ops registered, just call the stub function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	if (set_function_trace_op == &ftrace_list_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		func = ftrace_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	 * If we are at the end of the list and this ops is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	 * recursion safe and not dynamic and the arch supports passing ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	 * then have the mcount trampoline call the function directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	} else if (rcu_dereference_protected(ftrace_ops_list->next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		func = ftrace_ops_get_list_func(ftrace_ops_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		/* Just use the default ftrace_ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		set_function_trace_op = &ftrace_list_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		func = ftrace_ops_list_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	update_function_graph_func();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	/* If there's no change, then do nothing more here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	if (ftrace_trace_function == func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	 * If we are using the list function, it doesn't care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	 * about the function_trace_ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	if (func == ftrace_ops_list_func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		ftrace_trace_function = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		 * Don't even bother setting function_trace_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		 * it would be racy to do so anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) #ifndef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	 * For static tracing, we need to be a bit more careful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	 * The function change takes affect immediately. Thus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	 * we need to coordinate the setting of the function_trace_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	 * with the setting of the ftrace_trace_function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	 * Set the function to the list ops, which will call the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	 * function we want, albeit indirectly, but it handles the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	 * ftrace_ops and doesn't depend on function_trace_op.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	ftrace_trace_function = ftrace_ops_list_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	 * Make sure all CPUs see this. Yes this is slow, but static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	 * tracing is slow and nasty to have enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	synchronize_rcu_tasks_rude();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	/* Now all cpus are using the list ops. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	function_trace_op = set_function_trace_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	/* Make sure the function_trace_op is visible on all CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	/* Nasty way to force a rmb on all cpus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	smp_call_function(ftrace_sync_ipi, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	/* OK, we are all set to update the ftrace_trace_function now! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) #endif /* !CONFIG_DYNAMIC_FTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	ftrace_trace_function = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) static void add_ftrace_ops(struct ftrace_ops __rcu **list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 			   struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	rcu_assign_pointer(ops->next, *list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	 * We are entering ops into the list but another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	 * CPU might be walking that list. We need to make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	 * the ops->next pointer is valid before another CPU sees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	 * the ops pointer included into the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	rcu_assign_pointer(*list, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			     struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	struct ftrace_ops **p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	 * If we are removing the last function, then simply point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	 * to the ftrace_stub.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	if (rcu_dereference_protected(*list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 			lockdep_is_held(&ftrace_lock)) == ops &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	    rcu_dereference_protected(ops->next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		*list = &ftrace_list_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		if (*p == ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	if (*p != ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	*p = (*p)->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) static void ftrace_update_trampoline(struct ftrace_ops *ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) int __register_ftrace_function(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	if (ops->flags & FTRACE_OPS_FL_DELETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	if (!core_kernel_data((unsigned long)ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	add_ftrace_ops(&ftrace_ops_list, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	/* Always save the function, and reset at unregistering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	ops->saved_func = ops->func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	if (ftrace_pids_enabled(ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		ops->func = ftrace_pid_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	ftrace_update_trampoline(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	if (ftrace_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		update_ftrace_function();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) int __unregister_ftrace_function(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	ret = remove_ftrace_ops(&ftrace_ops_list, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	if (ftrace_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		update_ftrace_function();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	ops->func = ops->saved_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) static void ftrace_update_pid_func(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	struct ftrace_ops *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	/* Only do something if we are tracing something */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	if (ftrace_trace_function == ftrace_stub)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	do_for_each_ftrace_op(op, ftrace_ops_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		if (op->flags & FTRACE_OPS_FL_PID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 			op->func = ftrace_pids_enabled(op) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 				ftrace_pid_func : op->saved_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			ftrace_update_trampoline(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	} while_for_each_ftrace_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	update_ftrace_function();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) #ifdef CONFIG_FUNCTION_PROFILER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) struct ftrace_profile {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	struct hlist_node		node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	unsigned long			ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	unsigned long			counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	unsigned long long		time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	unsigned long long		time_squared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) struct ftrace_profile_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	struct ftrace_profile_page	*next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	unsigned long			index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	struct ftrace_profile		records[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) struct ftrace_profile_stat {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	atomic_t			disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	struct hlist_head		*hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	struct ftrace_profile_page	*pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	struct ftrace_profile_page	*start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	struct tracer_stat		stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) #define PROFILE_RECORDS_SIZE						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) #define PROFILES_PER_PAGE					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) static int ftrace_profile_enabled __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) static DEFINE_MUTEX(ftrace_profile_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) #define FTRACE_PROFILE_HASH_BITS 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) function_stat_next(void *v, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	struct ftrace_profile *rec = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	struct ftrace_profile_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433)  again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	if (idx != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		rec++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	if ((void *)rec >= (void *)&pg->records[pg->index]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		pg = pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		if (!pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		rec = &pg->records[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		if (!rec->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 			goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	return rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) static void *function_stat_start(struct tracer_stat *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	struct ftrace_profile_stat *stat =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		container_of(trace, struct ftrace_profile_stat, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (!stat || !stat->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	return function_stat_next(&stat->start->records[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) /* function graph compares on total time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) static int function_stat_cmp(const void *p1, const void *p2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	const struct ftrace_profile *a = p1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	const struct ftrace_profile *b = p2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	if (a->time < b->time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if (a->time > b->time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) /* not function graph compares against hits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) static int function_stat_cmp(const void *p1, const void *p2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	const struct ftrace_profile *a = p1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	const struct ftrace_profile *b = p2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (a->counter < b->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	if (a->counter > b->counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) static int function_stat_headers(struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	seq_puts(m, "  Function                               "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		 "Hit    Time            Avg             s^2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		    "  --------                               "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		 "---    ----            ---             ---\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	seq_puts(m, "  Function                               Hit\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		    "  --------                               ---\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) static int function_stat_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	struct ftrace_profile *rec = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	char str[KSYM_SYMBOL_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	static struct trace_seq s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	unsigned long long avg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	unsigned long long stddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	mutex_lock(&ftrace_profile_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	/* we raced with function_profile_reset() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	if (unlikely(rec->counter == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	avg = div64_ul(rec->time, rec->counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	if (tracing_thresh && (avg < tracing_thresh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	seq_puts(m, "    ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	/* Sample standard deviation (s^2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	if (rec->counter <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		stddev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		 * Apply Welford's method:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		stddev = rec->counter * rec->time_squared -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			 rec->time * rec->time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		 * Divide only 1000 for ns^2 -> us^2 conversion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		 * trace_print_graph_duration will divide 1000 again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		stddev = div64_ul(stddev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 				  rec->counter * (rec->counter - 1) * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	trace_seq_init(&s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	trace_print_graph_duration(rec->time, &s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	trace_seq_puts(&s, "    ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	trace_print_graph_duration(avg, &s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	trace_seq_puts(&s, "    ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	trace_print_graph_duration(stddev, &s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	trace_print_seq(m, &s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	seq_putc(m, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	mutex_unlock(&ftrace_profile_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	struct ftrace_profile_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	pg = stat->pages = stat->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	while (pg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		pg->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		pg = pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	memset(stat->hash, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	struct ftrace_profile_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	int functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	int pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	/* If we already allocated, do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	if (stat->pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	if (!stat->pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	functions = ftrace_update_tot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	 * We do not know the number of functions that exist because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	 * dynamic tracing is what counts them. With past experience
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	 * we have around 20K functions. That should be more than enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	 * It is highly unlikely we will execute every function in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	 * the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	functions = 20000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	pg = stat->start = stat->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	for (i = 1; i < pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		if (!pg->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		pg = pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	pg = stat->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	while (pg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		unsigned long tmp = (unsigned long)pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		pg = pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		free_page(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	stat->pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	stat->start = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) static int ftrace_profile_init_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	struct ftrace_profile_stat *stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	stat = &per_cpu(ftrace_profile_stats, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (stat->hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		/* If the profile is already created, simply reset it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		ftrace_profile_reset(stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	 * We are profiling all functions, but usually only a few thousand
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	 * functions are hit. We'll make a hash of 1024 items.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	size = FTRACE_PROFILE_HASH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	if (!stat->hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	/* Preallocate the function profiling pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	if (ftrace_profile_pages_init(stat) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		kfree(stat->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		stat->hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) static int ftrace_profile_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		ret = ftrace_profile_init_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) /* interrupts must be disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) static struct ftrace_profile *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	struct ftrace_profile *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	struct hlist_head *hhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	unsigned long key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	hhd = &stat->hash[key];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (hlist_empty(hhd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		if (rec->ip == ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			return rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) static void ftrace_add_profile(struct ftrace_profile_stat *stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			       struct ftrace_profile *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	unsigned long key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  * The memory is already allocated, this simply finds a new record to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) static struct ftrace_profile *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	struct ftrace_profile *rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	/* prevent recursion (from NMIs) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	if (atomic_inc_return(&stat->disabled) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 * Try to find the function again since an NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	 * could have added it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	rec = ftrace_find_profiled_func(stat, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	if (stat->pages->index == PROFILES_PER_PAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		if (!stat->pages->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		stat->pages = stat->pages->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	rec = &stat->pages->records[stat->pages->index++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	rec->ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	ftrace_add_profile(stat, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	atomic_dec(&stat->disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	return rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) function_profile_call(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		      struct ftrace_ops *ops, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	struct ftrace_profile_stat *stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	struct ftrace_profile *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	if (!ftrace_profile_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	stat = this_cpu_ptr(&ftrace_profile_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	if (!stat->hash || !ftrace_profile_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	rec = ftrace_find_profiled_func(stat, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	if (!rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		rec = ftrace_profile_alloc(stat, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		if (!rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	rec->counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) static bool fgraph_graph_time = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) void ftrace_graph_graph_time_control(bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	fgraph_graph_time = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) static int profile_graph_entry(struct ftrace_graph_ent *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	struct ftrace_ret_stack *ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	function_profile_call(trace->func, 0, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	/* If function graph is shutting down, ret_stack can be NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	if (!current->ret_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	ret_stack = ftrace_graph_get_ret_stack(current, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	if (ret_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		ret_stack->subtime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) static void profile_graph_return(struct ftrace_graph_ret *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	struct ftrace_ret_stack *ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	struct ftrace_profile_stat *stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	unsigned long long calltime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	struct ftrace_profile *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	stat = this_cpu_ptr(&ftrace_profile_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	if (!stat->hash || !ftrace_profile_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	/* If the calltime was zero'd ignore it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if (!trace->calltime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	calltime = trace->rettime - trace->calltime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	if (!fgraph_graph_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		/* Append this call time to the parent time to subtract */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		ret_stack = ftrace_graph_get_ret_stack(current, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		if (ret_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			ret_stack->subtime += calltime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		ret_stack = ftrace_graph_get_ret_stack(current, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		if (ret_stack && ret_stack->subtime < calltime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			calltime -= ret_stack->subtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			calltime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	rec = ftrace_find_profiled_func(stat, trace->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	if (rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		rec->time += calltime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		rec->time_squared += calltime * calltime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) static struct fgraph_ops fprofiler_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	.entryfunc = &profile_graph_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	.retfunc = &profile_graph_return,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) static int register_ftrace_profiler(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	return register_ftrace_graph(&fprofiler_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) static void unregister_ftrace_profiler(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	unregister_ftrace_graph(&fprofiler_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) static struct ftrace_ops ftrace_profile_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	.func		= function_profile_call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	INIT_OPS_HASH(ftrace_profile_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) static int register_ftrace_profiler(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	return register_ftrace_function(&ftrace_profile_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) static void unregister_ftrace_profiler(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	unregister_ftrace_function(&ftrace_profile_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) ftrace_profile_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		     size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	val = !!val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	mutex_lock(&ftrace_profile_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	if (ftrace_profile_enabled ^ val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			ret = ftrace_profile_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 				cnt = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 			ret = register_ftrace_profiler();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 				cnt = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			ftrace_profile_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			ftrace_profile_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			 * unregister_ftrace_profiler calls stop_machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 			 * so this acts like an synchronize_rcu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			unregister_ftrace_profiler();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	mutex_unlock(&ftrace_profile_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	*ppos += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) ftrace_profile_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		     size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	char buf[64];		/* big enough to hold a number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) static const struct file_operations ftrace_profile_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	.open		= tracing_open_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	.read		= ftrace_profile_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	.write		= ftrace_profile_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	.llseek		= default_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) /* used to initialize the real stat files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) static struct tracer_stat function_stats __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	.name		= "functions",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	.stat_start	= function_stat_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	.stat_next	= function_stat_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	.stat_cmp	= function_stat_cmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	.stat_headers	= function_stat_headers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	.stat_show	= function_stat_show
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	struct ftrace_profile_stat *stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	struct dentry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		stat = &per_cpu(ftrace_profile_stats, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		name = kasprintf(GFP_KERNEL, "function%d", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			 * The files created are permanent, if something happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 			 * we still do not free memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			WARN(1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			     "Could not allocate stat file for cpu %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			     cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		stat->stat = function_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		stat->stat.name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		ret = register_stat_tracer(&stat->stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 			WARN(1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			     "Could not register function stat for cpu %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 			     cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	entry = tracefs_create_file("function_profile_enabled", 0644,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 				    d_tracer, NULL, &ftrace_profile_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) #else /* CONFIG_FUNCTION_PROFILER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) #endif /* CONFIG_FUNCTION_PROFILER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static struct ftrace_ops *removed_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  * Set when doing a global update, like enabling all recs or disabling them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  * It is not set when just updating a single ftrace_ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static bool update_all_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) #ifndef CONFIG_FTRACE_MCOUNT_RECORD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) # error Dynamic ftrace depends on MCOUNT_RECORD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct ftrace_func_probe {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	struct ftrace_probe_ops	*probe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	struct ftrace_ops	ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	struct trace_array	*tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	struct list_head	list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	void			*data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	int			ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  * We make these constant because no one should touch them,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  * but they are used as the default "empty hash", to avoid allocating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  * it all the time. These are in a read only section such that if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)  * anyone does try to modify it, it will cause an exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static const struct hlist_head empty_buckets[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) static const struct ftrace_hash empty_hash = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	.buckets = (struct hlist_head *)empty_buckets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) #define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct ftrace_ops global_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	.func				= ftrace_stub,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	.local_hash.notrace_hash	= EMPTY_HASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	.local_hash.filter_hash		= EMPTY_HASH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	INIT_OPS_HASH(global_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	.flags				= FTRACE_OPS_FL_RECURSION_SAFE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 					  FTRACE_OPS_FL_INITIALIZED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 					  FTRACE_OPS_FL_PID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)  * Used by the stack undwinder to know about dynamic ftrace trampolines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	struct ftrace_ops *op = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	 * Some of the ops may be dynamically allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	 * they are freed after a synchronize_rcu().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	do_for_each_ftrace_op(op, ftrace_ops_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		 * This is to check for dynamically allocated trampolines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		 * Trampolines that are in kernel text will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		 * core_kernel_text() return true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		if (op->trampoline && op->trampoline_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			if (addr >= op->trampoline &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			    addr < op->trampoline + op->trampoline_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 				preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 				return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	} while_for_each_ftrace_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)  * This is used by __kernel_text_address() to return true if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)  * address is on a dynamically allocated trampoline that would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)  * not return true for either core_kernel_text() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)  * is_module_text_address().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) bool is_ftrace_trampoline(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	return ftrace_ops_trampoline(addr) != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) struct ftrace_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	struct ftrace_page	*next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	struct dyn_ftrace	*records;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	int			index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	int			size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) #define ENTRY_SIZE sizeof(struct dyn_ftrace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static struct ftrace_page	*ftrace_pages_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) static struct ftrace_page	*ftrace_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) static __always_inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	if (hash->size_bits > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		return hash_long(ip, hash->size_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /* Only use this function if ftrace_hash_empty() has already been tested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static __always_inline struct ftrace_func_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	unsigned long key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	struct hlist_head *hhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	key = ftrace_hash_key(hash, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	hhd = &hash->buckets[key];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		if (entry->ip == ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 			return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)  * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)  * @hash: The hash to look at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)  * @ip: The instruction pointer to test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)  * Search a given @hash to see if a given instruction pointer (@ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)  * exists in it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)  * Returns the entry that holds the @ip if found. NULL otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) struct ftrace_func_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (ftrace_hash_empty(hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	return __ftrace_lookup_ip(hash, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static void __add_hash_entry(struct ftrace_hash *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			     struct ftrace_func_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	struct hlist_head *hhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	unsigned long key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	key = ftrace_hash_key(hash, entry->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	hhd = &hash->buckets[key];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	hlist_add_head(&entry->hlist, hhd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	hash->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	entry->ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	__add_hash_entry(hash, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) free_hash_entry(struct ftrace_hash *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		  struct ftrace_func_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	hlist_del(&entry->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	hash->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) remove_hash_entry(struct ftrace_hash *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		  struct ftrace_func_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	hlist_del_rcu(&entry->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	hash->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static void ftrace_hash_clear(struct ftrace_hash *hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	struct hlist_head *hhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	struct hlist_node *tn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	int size = 1 << hash->size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	if (!hash->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		hhd = &hash->buckets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 			free_hash_entry(hash, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	FTRACE_WARN_ON(hash->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	list_del(&ftrace_mod->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	kfree(ftrace_mod->module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	kfree(ftrace_mod->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	kfree(ftrace_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) static void clear_ftrace_mod_list(struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	struct ftrace_mod_load *p, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	/* stack tracer isn't supported yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	if (!head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	list_for_each_entry_safe(p, n, head, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		free_ftrace_mod(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static void free_ftrace_hash(struct ftrace_hash *hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	if (!hash || hash == EMPTY_HASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	ftrace_hash_clear(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	kfree(hash->buckets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	kfree(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	struct ftrace_hash *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	hash = container_of(rcu, struct ftrace_hash, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	free_ftrace_hash(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	if (!hash || hash == EMPTY_HASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) void ftrace_free_filter(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	ftrace_ops_init(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	free_ftrace_hash(ops->func_hash->filter_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	free_ftrace_hash(ops->func_hash->notrace_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	struct ftrace_hash *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	if (!hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	size = 1 << size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	if (!hash->buckets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		kfree(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	hash->size_bits = size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	return hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static int ftrace_add_mod(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 			  const char *func, const char *module,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 			  int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	struct ftrace_mod_load *ftrace_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	if (!ftrace_mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	ftrace_mod->func = kstrdup(func, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	ftrace_mod->module = kstrdup(module, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	ftrace_mod->enable = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	if (!ftrace_mod->func || !ftrace_mod->module)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	list_add(&ftrace_mod->list, mod_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)  out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	free_ftrace_mod(ftrace_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) static struct ftrace_hash *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	struct ftrace_hash *new_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	new_hash = alloc_ftrace_hash(size_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	if (!new_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	if (hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		new_hash->flags = hash->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	/* Empty hash? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	if (ftrace_hash_empty(hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		return new_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	size = 1 << hash->size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			ret = add_hash_entry(new_hash, entry->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 			if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 				goto free_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	FTRACE_WARN_ON(new_hash->count != hash->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	return new_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)  free_hash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	free_ftrace_hash(new_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 				       struct ftrace_hash *new_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	struct ftrace_hash *new_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	struct hlist_head *hhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	struct hlist_node *tn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	int bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	 * Use around half the size (max bit of it), but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	 * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	bits = fls(size / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	/* Don't allocate too much */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	if (bits > FTRACE_HASH_MAX_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		bits = FTRACE_HASH_MAX_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	new_hash = alloc_ftrace_hash(bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	if (!new_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	new_hash->flags = src->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	size = 1 << src->size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		hhd = &src->buckets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 			remove_hash_entry(src, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 			__add_hash_entry(new_hash, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	return new_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static struct ftrace_hash *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) __ftrace_hash_move(struct ftrace_hash *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	int size = src->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	 * If the new source is empty, just return the empty_hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	if (ftrace_hash_empty(src))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		return EMPTY_HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	return dup_hash(src, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) ftrace_hash_move(struct ftrace_ops *ops, int enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		 struct ftrace_hash **dst, struct ftrace_hash *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	struct ftrace_hash *new_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	new_hash = __ftrace_hash_move(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	if (!new_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		/* IPMODIFY should be updated only when filter_hash updating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		ret = ftrace_hash_ipmodify_update(ops, new_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 			free_ftrace_hash(new_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	 * Remove the current set, update the hash and add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	 * them back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	ftrace_hash_rec_disable_modify(ops, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	rcu_assign_pointer(*dst, new_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	ftrace_hash_rec_enable_modify(ops, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) static bool hash_contains_ip(unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 			     struct ftrace_ops_hash *hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	 * The function record is a match if it exists in the filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	 * hash and not in the notrace hash. Note, an empty hash is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	 * considered a match for the filter hash, but an empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	 * notrace hash is considered not in the notrace hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	return (ftrace_hash_empty(hash->filter_hash) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		__ftrace_lookup_ip(hash->filter_hash, ip)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		(ftrace_hash_empty(hash->notrace_hash) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		 !__ftrace_lookup_ip(hash->notrace_hash, ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)  * Test the hashes for this ops to see if we want to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)  * the ops->func or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)  * It's a match if the ip is in the ops->filter_hash or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)  * the filter_hash does not exist or is empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)  *  AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)  * the ip is not in the ops->notrace_hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)  * This needs to be called with preemption disabled as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)  * the hashes are freed with call_rcu().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	struct ftrace_ops_hash hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	 * There's a small race when adding ops that the ftrace handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	 * that wants regs, may be called without them. We can not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	 * allow that handler to be called if regs is NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	if (hash_contains_ip(ip, &hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)  * This is a double for. Do not use 'break' to break out of the loop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)  * you must use a goto.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) #define do_for_each_ftrace_rec(pg, rec)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		int _____i;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		for (_____i = 0; _____i < pg->index; _____i++) {	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 			rec = &pg->records[_____i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) #define while_for_each_ftrace_rec()		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		}				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) static int ftrace_cmp_recs(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	const struct dyn_ftrace *key = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	const struct dyn_ftrace *rec = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	if (key->flags < rec->ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	struct ftrace_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	struct dyn_ftrace *rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	struct dyn_ftrace key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	key.ip = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	key.flags = end;	/* overload flags, as it is unsigned long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		if (end < pg->records[0].ip ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		rec = bsearch(&key, pg->records, pg->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 			      sizeof(struct dyn_ftrace),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 			      ftrace_cmp_recs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		if (rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	return rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)  * ftrace_location_range - return the first address of a traced location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)  *	if it touches the given ip range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)  * @start: start of range to search.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)  * @end: end of range to search (inclusive). @end points to the last byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)  *	to check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)  * Returns rec->ip if the related ftrace location is a least partly within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)  * the given address range. That is, the first address of the instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)  * that is either a NOP or call to the function tracer. It checks the ftrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)  * internal tables to determine if the address belongs or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) unsigned long ftrace_location_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	rec = lookup_rec(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	if (rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		return rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)  * ftrace_location - return true if the ip giving is a traced location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)  * @ip: the instruction pointer to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)  * Returns rec->ip if @ip given is a pointer to a ftrace location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)  * That is, the instruction that is either a NOP or call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)  * the function tracer. It checks the ftrace internal tables to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)  * determine if the address belongs or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) unsigned long ftrace_location(unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	return ftrace_location_range(ip, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)  * ftrace_text_reserved - return true if range contains an ftrace location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)  * @start: start of range to search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)  * @end: end of range to search (inclusive). @end points to the last byte to check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)  * Returns 1 if @start and @end contains a ftrace location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)  * That is, the instruction that is either a NOP or call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)  * the function tracer. It checks the ftrace internal tables to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)  * determine if the address belongs or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) int ftrace_text_reserved(const void *start, const void *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	ret = ftrace_location_range((unsigned long)start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 				    (unsigned long)end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	return (int)!!ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /* Test if ops registered to this rec needs regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	struct ftrace_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	bool keep_regs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	for (ops = ftrace_ops_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	     ops != &ftrace_list_end; ops = ops->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		/* pass rec in as regs to have non-NULL val */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		if (ftrace_ops_test(ops, rec->ip, rec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 				keep_regs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	return  keep_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) static struct ftrace_ops *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static struct ftrace_ops *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) static struct ftrace_ops *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 				     int filter_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 				     bool inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	struct ftrace_hash *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	struct ftrace_hash *other_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	struct ftrace_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	bool update = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	int all = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	/* Only update if the ops has been registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	 * In the filter_hash case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	 *   If the count is zero, we update all records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	 *   Otherwise we just update the items in the hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	 * In the notrace_hash case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	 *   We enable the update in the hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	 *   As disabling notrace means enabling the tracing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	 *   and enabling notrace means disabling, the inc variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	 *   gets inversed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	if (filter_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		hash = ops->func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		other_hash = ops->func_hash->notrace_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		if (ftrace_hash_empty(hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 			all = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		inc = !inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		hash = ops->func_hash->notrace_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		other_hash = ops->func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		 * If the notrace hash has no items,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		 * then there's nothing to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		if (ftrace_hash_empty(hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	do_for_each_ftrace_rec(pg, rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		int in_other_hash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		int in_hash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		int match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		if (rec->flags & FTRACE_FL_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		if (all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 			 * Only the filter_hash affects all records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			 * Update if the record is not in the notrace hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 				match = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			 * If filter_hash is set, we want to match all functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 			 * that are in the hash but not in the other hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 			 * If filter_hash is not set, then we are decrementing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 			 * That means we match anything that is in the hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 			 * and also in the other_hash. That is, we need to turn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 			 * off functions in the other hash because they are disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 			 * by this hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 			if (filter_hash && in_hash && !in_other_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 				match = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 			else if (!filter_hash && in_hash &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 				 (in_other_hash || ftrace_hash_empty(other_hash)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 				match = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 		if (!match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		if (inc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 			rec->flags++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 			if (ops->flags & FTRACE_OPS_FL_DIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 				rec->flags |= FTRACE_FL_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 			 * If there's only a single callback registered to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 			 * function, and the ops has a trampoline registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 			 * for it, then we can call it directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 			if (ftrace_rec_count(rec) == 1 && ops->trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 				rec->flags |= FTRACE_FL_TRAMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 				 * If we are adding another function callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 				 * to this function, and the previous had a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 				 * custom trampoline in use, then we need to go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 				 * back to the default trampoline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 				rec->flags &= ~FTRACE_FL_TRAMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 			 * If any ops wants regs saved for this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 			 * then all ops will get saved regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 				rec->flags |= FTRACE_FL_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 			rec->flags--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 			 * Only the internal direct_ops should have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 			 * DIRECT flag set. Thus, if it is removing a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 			 * function, then that function should no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 			 * be direct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 			if (ops->flags & FTRACE_OPS_FL_DIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 				rec->flags &= ~FTRACE_FL_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 			 * If the rec had REGS enabled and the ops that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 			 * being removed had REGS set, then see if there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 			 * still any ops for this record that wants regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 			 * If not, we can stop recording them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 			if (ftrace_rec_count(rec) > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 			    rec->flags & FTRACE_FL_REGS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 				if (!test_rec_ops_needs_regs(rec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 					rec->flags &= ~FTRACE_FL_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 			 * The TRAMP needs to be set only if rec count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 			 * is decremented to one, and the ops that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 			 * left has a trampoline. As TRAMP can only be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 			 * enabled if there is only a single ops attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 			 * to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 			if (ftrace_rec_count(rec) == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 			    ftrace_find_tramp_ops_any_other(rec, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 				rec->flags |= FTRACE_FL_TRAMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 				rec->flags &= ~FTRACE_FL_TRAMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 			 * flags will be cleared in ftrace_check_record()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 			 * if rec count is zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		/* Shortcut, if we handled all records, we are done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		if (!all && count == hash->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 			return update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	} while_for_each_ftrace_rec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	return update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 				    int filter_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	return __ftrace_hash_rec_update(ops, filter_hash, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 				   int filter_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	return __ftrace_hash_rec_update(ops, filter_hash, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 					  int filter_hash, int inc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	struct ftrace_ops *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	__ftrace_hash_rec_update(ops, filter_hash, inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	if (ops->func_hash != &global_ops.local_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	 * If the ops shares the global_ops hash, then we need to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	 * all ops that are enabled and use this hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	do_for_each_ftrace_op(op, ftrace_ops_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		/* Already done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		if (op == ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		if (op->func_hash == &global_ops.local_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 			__ftrace_hash_rec_update(op, filter_hash, inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	} while_for_each_ftrace_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 					   int filter_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	ftrace_hash_rec_update_modify(ops, filter_hash, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 					  int filter_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	ftrace_hash_rec_update_modify(ops, filter_hash, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)  * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)  * or no-needed to update, -EBUSY if it detects a conflict of the flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)  * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)  * Note that old_hash and new_hash has below meanings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)  *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)  *  - If the hash is EMPTY_HASH, it hits nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)  *  - Anything else hits the recs which match the hash entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 					 struct ftrace_hash *old_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 					 struct ftrace_hash *new_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	struct ftrace_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	struct dyn_ftrace *rec, *end = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	int in_old, in_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	/* Only update if the ops has been registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	 * Since the IPMODIFY is a very address sensitive action, we do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	 * allow ftrace_ops to set all functions to new hash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	if (!new_hash || !old_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	/* Update rec->flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	do_for_each_ftrace_rec(pg, rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		if (rec->flags & FTRACE_FL_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		/* We need to update only differences of filter_hash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		if (in_old == in_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		if (in_new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 			/* New entries must ensure no others are using it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 			if (rec->flags & FTRACE_FL_IPMODIFY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 				goto rollback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 			rec->flags |= FTRACE_FL_IPMODIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		} else /* Removed entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 			rec->flags &= ~FTRACE_FL_IPMODIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	} while_for_each_ftrace_rec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) rollback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	end = rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	/* Roll back what we did above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	do_for_each_ftrace_rec(pg, rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		if (rec->flags & FTRACE_FL_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		if (rec == end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 			goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		if (in_old == in_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		if (in_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 			rec->flags &= ~FTRACE_FL_IPMODIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 			rec->flags |= FTRACE_FL_IPMODIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	} while_for_each_ftrace_rec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	if (ftrace_hash_empty(hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) /* Disabling always succeeds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	if (ftrace_hash_empty(hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 				       struct ftrace_hash *new_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	if (ftrace_hash_empty(old_hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		old_hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	if (ftrace_hash_empty(new_hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		new_hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) static void print_ip_ins(const char *fmt, const unsigned char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	char ins[MCOUNT_INSN_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	printk(KERN_CONT "%s", fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) enum ftrace_bug_type ftrace_bug_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) const void *ftrace_expected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) static void print_bug_type(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	switch (ftrace_bug_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	case FTRACE_BUG_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	case FTRACE_BUG_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		pr_info("Initializing ftrace call sites\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	case FTRACE_BUG_NOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		pr_info("Setting ftrace call site to NOP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	case FTRACE_BUG_CALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		pr_info("Setting ftrace call site to call ftrace function\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	case FTRACE_BUG_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		pr_info("Updating ftrace call site to call a different ftrace function\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)  * ftrace_bug - report and shutdown function tracer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)  * @failed: The failed type (EFAULT, EINVAL, EPERM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)  * @rec: The record that failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)  * The arch code that enables or disables the function tracing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)  * can call ftrace_bug() when it has detected a problem in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)  * modifying the code. @failed should be one of either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)  * EFAULT - if the problem happens on reading the @ip address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)  * EINVAL - if what is read at @ip is not what was expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)  * EPERM - if the problem happens on writing to the @ip address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) void ftrace_bug(int failed, struct dyn_ftrace *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	unsigned long ip = rec ? rec->ip : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	pr_info("------------[ ftrace bug ]------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	switch (failed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	case -EFAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		pr_info("ftrace faulted on modifying ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		print_ip_sym(KERN_INFO, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	case -EINVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		pr_info("ftrace failed to modify ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		print_ip_sym(KERN_INFO, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		print_ip_ins(" actual:   ", (unsigned char *)ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		if (ftrace_expected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 			print_ip_ins(" expected: ", ftrace_expected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 			pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	case -EPERM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		pr_info("ftrace faulted on writing ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		print_ip_sym(KERN_INFO, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		pr_info("ftrace faulted on unknown error ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		print_ip_sym(KERN_INFO, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	print_bug_type();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	if (rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		struct ftrace_ops *ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		pr_info("ftrace record flags: %lx\n", rec->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		pr_cont(" (%ld)%s", ftrace_rec_count(rec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 			rec->flags & FTRACE_FL_REGS ? " R" : "  ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 			ops = ftrace_find_tramp_ops_any(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 			if (ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 				do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 					pr_cont("\ttramp: %pS (%pS)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 						(void *)ops->trampoline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 						(void *)ops->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 					ops = ftrace_find_tramp_ops_next(rec, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 				} while (ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 				pr_cont("\ttramp: ERROR!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		ip = ftrace_get_addr_curr(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		pr_cont("\n expected tramp: %lx\n", ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	FTRACE_WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	unsigned long flag = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	if (rec->flags & FTRACE_FL_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		return FTRACE_UPDATE_IGNORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	 * If we are updating calls:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	 *   If the record has a ref count, then we need to enable it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	 *   because someone is using it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	 *   Otherwise we make sure its disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	 * If we are disabling calls, then disable all records that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	 * are enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	if (enable && ftrace_rec_count(rec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		flag = FTRACE_FL_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	 * If enabling and the REGS flag does not match the REGS_EN, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	 * this record. Set flags to fail the compare against ENABLED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	 * Same for direct calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	if (flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		if (!(rec->flags & FTRACE_FL_REGS) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		    !(rec->flags & FTRACE_FL_REGS_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 			flag |= FTRACE_FL_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		if (!(rec->flags & FTRACE_FL_TRAMP) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		    !(rec->flags & FTRACE_FL_TRAMP_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 			flag |= FTRACE_FL_TRAMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		 * Direct calls are special, as count matters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		 * We must test the record for direct, if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		 * DIRECT and DIRECT_EN do not match, but only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		 * if the count is 1. That's because, if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		 * count is something other than one, we do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		 * want the direct enabled (it will be done via the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		 * direct helper). But if DIRECT_EN is set, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		 * the count is not one, we need to clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		if (ftrace_rec_count(rec) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 			if (!(rec->flags & FTRACE_FL_DIRECT) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 			    !(rec->flags & FTRACE_FL_DIRECT_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 				flag |= FTRACE_FL_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		} else if (rec->flags & FTRACE_FL_DIRECT_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 			flag |= FTRACE_FL_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	/* If the state of this record hasn't changed, then do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		return FTRACE_UPDATE_IGNORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	if (flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		/* Save off if rec is being enabled (for return value) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		flag ^= rec->flags & FTRACE_FL_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		if (update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 			rec->flags |= FTRACE_FL_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 			if (flag & FTRACE_FL_REGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 				if (rec->flags & FTRACE_FL_REGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 					rec->flags |= FTRACE_FL_REGS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 					rec->flags &= ~FTRACE_FL_REGS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 			if (flag & FTRACE_FL_TRAMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 				if (rec->flags & FTRACE_FL_TRAMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 					rec->flags |= FTRACE_FL_TRAMP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 					rec->flags &= ~FTRACE_FL_TRAMP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 			if (flag & FTRACE_FL_DIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 				 * If there's only one user (direct_ops helper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 				 * then we can call the direct function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 				 * directly (no ftrace trampoline).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 				if (ftrace_rec_count(rec) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 					if (rec->flags & FTRACE_FL_DIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 						rec->flags |= FTRACE_FL_DIRECT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 					else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 						rec->flags &= ~FTRACE_FL_DIRECT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 					/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 					 * Can only call directly if there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 					 * only one callback to the function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 					 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 					rec->flags &= ~FTRACE_FL_DIRECT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		 * If this record is being updated from a nop, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		 *   return UPDATE_MAKE_CALL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		 * Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 		 *   from the save regs, to a non-save regs function or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		 *   vice versa, or from a trampoline call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 		if (flag & FTRACE_FL_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 			ftrace_bug_type = FTRACE_BUG_CALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 			return FTRACE_UPDATE_MAKE_CALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		ftrace_bug_type = FTRACE_BUG_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		return FTRACE_UPDATE_MODIFY_CALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	if (update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		/* If there's no more users, clear all flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		if (!ftrace_rec_count(rec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 			rec->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 			 * Just disable the record, but keep the ops TRAMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 			 * and REGS states. The _EN flags must be disabled though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 			rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 					FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	ftrace_bug_type = FTRACE_BUG_NOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	return FTRACE_UPDATE_MAKE_NOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)  * ftrace_update_record, set a record that now is tracing or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)  * @rec: the record to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)  * @enable: set to true if the record is tracing, false to force disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)  * The records that represent all functions that can be traced need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)  * to be updated when tracing has been enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	return ftrace_check_record(rec, enable, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)  * ftrace_test_record, check if the record has been enabled or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)  * @rec: the record to test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)  * @enable: set to true to check if enabled, false if it is disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)  * The arch code may need to test if a record is already set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)  * tracing to determine how to modify the function code that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)  * represents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	return ftrace_check_record(rec, enable, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) static struct ftrace_ops *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	struct ftrace_ops *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	do_for_each_ftrace_op(op, ftrace_ops_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 		if (!op->trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		if (hash_contains_ip(ip, op->func_hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 			return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	} while_for_each_ftrace_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) static struct ftrace_ops *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	struct ftrace_ops *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	do_for_each_ftrace_op(op, ftrace_ops_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 		if (op == op_exclude || !op->trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		if (hash_contains_ip(ip, op->func_hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 			return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	} while_for_each_ftrace_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) static struct ftrace_ops *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 			   struct ftrace_ops *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	while_for_each_ftrace_op(op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		if (!op->trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		if (hash_contains_ip(ip, op->func_hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 			return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) static struct ftrace_ops *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	struct ftrace_ops *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	 * Need to check removed ops first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	 * If they are being removed, and this rec has a tramp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	 * and this rec is in the ops list, then it would be the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	 * one with the tramp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	if (removed_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		if (hash_contains_ip(ip, &removed_ops->old_hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 			return removed_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	 * Need to find the current trampoline for a rec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	 * Now, a trampoline is only attached to a rec if there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	 * was a single 'ops' attached to it. But this can be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	 * when we are adding another op to the rec or removing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	 * current one. Thus, if the op is being added, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	 * ignore it because it hasn't attached itself to the rec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	 * yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	 * If an ops is being modified (hooking to different functions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	 * then we don't care about the new functions that are being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	 * added, just the old ones (that are probably being removed).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	 * If we are adding an ops to a function that already is using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	 * a trampoline, it needs to be removed (trampolines are only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	 * for single ops connected), then an ops that is not being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	 * modified also needs to be checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	do_for_each_ftrace_op(op, ftrace_ops_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		if (!op->trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 		 * If the ops is being added, it hasn't gotten to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		 * the point to be removed from this tree yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 		if (op->flags & FTRACE_OPS_FL_ADDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		 * If the ops is being modified and is in the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		 * hash, then it is probably being removed from this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		 * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		    hash_contains_ip(ip, &op->old_hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 			return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 		 * If the ops is not being added or modified, and it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		 * in its normal filter hash, then this must be the one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		 * we want!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		    hash_contains_ip(ip, op->func_hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 			return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	} while_for_each_ftrace_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) static struct ftrace_ops *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	struct ftrace_ops *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	do_for_each_ftrace_op(op, ftrace_ops_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 		/* pass rec in as regs to have non-NULL val */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		if (hash_contains_ip(ip, op->func_hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 			return op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	} while_for_each_ftrace_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) /* Protected by rcu_tasks for reading, and direct_mutex for writing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) static struct ftrace_hash *direct_functions = EMPTY_HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) static DEFINE_MUTEX(direct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) int ftrace_direct_func_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)  * Search the direct_functions hash to see if the given instruction pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)  * has a direct caller attached to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) unsigned long ftrace_find_rec_direct(unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	entry = __ftrace_lookup_ip(direct_functions, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	return entry->direct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) static void call_direct_funcs(unsigned long ip, unsigned long pip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 			      struct ftrace_ops *ops, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	addr = ftrace_find_rec_direct(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	arch_ftrace_set_direct_caller(regs, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) struct ftrace_ops direct_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	.func		= call_direct_funcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	.flags		= FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 			  | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 			  | FTRACE_OPS_FL_PERMANENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	 * By declaring the main trampoline as this trampoline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	 * it will never have one allocated for it. Allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	 * trampolines should not call direct functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	 * The direct_ops should only be called by the builtin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	 * ftrace_regs_caller trampoline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	.trampoline	= FTRACE_REGS_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)  * ftrace_get_addr_new - Get the call address to set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)  * @rec:  The ftrace record descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)  * If the record has the FTRACE_FL_REGS set, that means that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)  * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)  * is not set, then it wants to convert to the normal callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)  * Returns the address of the trampoline to set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	struct ftrace_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	if ((rec->flags & FTRACE_FL_DIRECT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	    (ftrace_rec_count(rec) == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 		addr = ftrace_find_rec_direct(rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		if (addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 			return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	/* Trampolines take precedence over regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	if (rec->flags & FTRACE_FL_TRAMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		ops = ftrace_find_tramp_ops_new(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 				(void *)rec->ip, (void *)rec->ip, rec->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 			/* Ftrace is shutting down, return anything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 			return (unsigned long)FTRACE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 		return ops->trampoline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	if (rec->flags & FTRACE_FL_REGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		return (unsigned long)FTRACE_REGS_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 		return (unsigned long)FTRACE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)  * ftrace_get_addr_curr - Get the call address that is already there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)  * @rec:  The ftrace record descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)  * The FTRACE_FL_REGS_EN is set when the record already points to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)  * a function that saves all the regs. Basically the '_EN' version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)  * represents the current state of the function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474)  * Returns the address of the trampoline that is currently being called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	struct ftrace_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	/* Direct calls take precedence over trampolines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	if (rec->flags & FTRACE_FL_DIRECT_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 		addr = ftrace_find_rec_direct(rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 		if (addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 			return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	/* Trampolines take precedence over regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	if (rec->flags & FTRACE_FL_TRAMP_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		ops = ftrace_find_tramp_ops_curr(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 		if (FTRACE_WARN_ON(!ops)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 			pr_warn("Bad trampoline accounting at: %p (%pS)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 				(void *)rec->ip, (void *)rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 			/* Ftrace is shutting down, return anything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 			return (unsigned long)FTRACE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 		return ops->trampoline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	if (rec->flags & FTRACE_FL_REGS_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 		return (unsigned long)FTRACE_REGS_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 		return (unsigned long)FTRACE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) __ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	unsigned long ftrace_old_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	unsigned long ftrace_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	ftrace_addr = ftrace_get_addr_new(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	/* This needs to be done before we call ftrace_update_record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	ftrace_old_addr = ftrace_get_addr_curr(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	ret = ftrace_update_record(rec, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	case FTRACE_UPDATE_IGNORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	case FTRACE_UPDATE_MAKE_CALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 		ftrace_bug_type = FTRACE_BUG_CALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 		return ftrace_make_call(rec, ftrace_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	case FTRACE_UPDATE_MAKE_NOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		ftrace_bug_type = FTRACE_BUG_NOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	case FTRACE_UPDATE_MODIFY_CALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 		ftrace_bug_type = FTRACE_BUG_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	return -1; /* unknown ftrace bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) void __weak ftrace_replace_code(int mod_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	struct ftrace_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 	bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	int failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	do_for_each_ftrace_rec(pg, rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		if (rec->flags & FTRACE_FL_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 		failed = __ftrace_replace_code(rec, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 		if (failed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 			ftrace_bug(failed, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 			/* Stop processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 		if (schedulable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 			cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	} while_for_each_ftrace_rec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) struct ftrace_rec_iter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	struct ftrace_page	*pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	int			index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)  * ftrace_rec_iter_start, start up iterating over traced functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)  * Returns an iterator handle that is used to iterate over all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)  * the records that represent address locations where functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)  * are traced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)  * May return NULL if no records are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) struct ftrace_rec_iter *ftrace_rec_iter_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	 * We only use a single iterator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	 * Protected by the ftrace_lock mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	static struct ftrace_rec_iter ftrace_rec_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	iter->pg = ftrace_pages_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	iter->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	/* Could have empty pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	while (iter->pg && !iter->pg->index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 		iter->pg = iter->pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	if (!iter->pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	return iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)  * ftrace_rec_iter_next, get the next record to process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)  * @iter: The handle to the iterator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)  * Returns the next iterator after the given iterator @iter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	iter->index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	if (iter->index >= iter->pg->index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 		iter->pg = iter->pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 		iter->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 		/* Could have empty pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 		while (iter->pg && !iter->pg->index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 			iter->pg = iter->pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	if (!iter->pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	return iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)  * ftrace_rec_iter_record, get the record at the iterator location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)  * @iter: The current iterator location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)  * Returns the record that the current @iter is at.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	return &iter->pg->records[iter->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	ret = ftrace_init_nop(mod, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 		ftrace_bug_type = FTRACE_BUG_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 		ftrace_bug(ret, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)  * archs can override this function if they must do something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)  * before the modifying code is performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) int __weak ftrace_arch_code_modify_prepare(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)  * archs can override this function if they must do something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)  * after the modifying code is performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) int __weak ftrace_arch_code_modify_post_process(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) void ftrace_modify_all_code(int command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	int update = command & FTRACE_UPDATE_TRACE_FUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	int mod_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	if (command & FTRACE_MAY_SLEEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 		mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	 * If the ftrace_caller calls a ftrace_ops func directly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 	 * we need to make sure that it only traces functions it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	 * expects to trace. When doing the switch of functions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	 * we need to update to the ftrace_ops_list_func first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	 * before the transition between old and new calls are set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	 * as the ftrace_ops_list_func will check the ops hashes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	 * to make sure the ops are having the right functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	 * traced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	if (update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 		err = ftrace_update_ftrace_func(ftrace_ops_list_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		if (FTRACE_WARN_ON(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	if (command & FTRACE_UPDATE_CALLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 		ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	else if (command & FTRACE_DISABLE_CALLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 		ftrace_replace_code(mod_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	if (update && ftrace_trace_function != ftrace_ops_list_func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 		function_trace_op = set_function_trace_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 		smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 		/* If irqs are disabled, we are in stop machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 		if (!irqs_disabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 			smp_call_function(ftrace_sync_ipi, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 		err = ftrace_update_ftrace_func(ftrace_trace_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 		if (FTRACE_WARN_ON(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	if (command & FTRACE_START_FUNC_RET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 		err = ftrace_enable_ftrace_graph_caller();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 	else if (command & FTRACE_STOP_FUNC_RET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 		err = ftrace_disable_ftrace_graph_caller();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 	FTRACE_WARN_ON(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) static int __ftrace_modify_code(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	int *command = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	ftrace_modify_all_code(*command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)  * ftrace_run_stop_machine, go back to the stop machine method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)  * @command: The command to tell ftrace what to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)  * If an arch needs to fall back to the stop machine method, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)  * it can call this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) void ftrace_run_stop_machine(int command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	stop_machine(__ftrace_modify_code, &command, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)  * arch_ftrace_update_code, modify the code to trace or not trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748)  * @command: The command that needs to be done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)  * Archs can override this function if it does not need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)  * run stop_machine() to modify code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) void __weak arch_ftrace_update_code(int command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	ftrace_run_stop_machine(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) static void ftrace_run_update_code(int command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	ret = ftrace_arch_code_modify_prepare();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	FTRACE_WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	 * By default we use stop_machine() to modify the code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	 * But archs can do what ever they want as long as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 	 * is safe. The stop_machine() is the safest, but also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	 * produces the most overhead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	arch_ftrace_update_code(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	ret = ftrace_arch_code_modify_post_process();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	FTRACE_WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 				   struct ftrace_ops_hash *old_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 	ops->flags |= FTRACE_OPS_FL_MODIFYING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	ops->old_hash.filter_hash = old_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	ops->old_hash.notrace_hash = old_hash->notrace_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 	ftrace_run_update_code(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	ops->old_hash.filter_hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	ops->old_hash.notrace_hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) static ftrace_func_t saved_ftrace_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) static int ftrace_start_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) /* List of trace_ops that have allocated trampolines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) static LIST_HEAD(ftrace_ops_trampoline_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	lockdep_assert_held(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	lockdep_assert_held(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 	list_del_rcu(&ops->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815)  * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)  * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)  * not a module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) static void ftrace_trampoline_free(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 	if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	    ops->trampoline) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 		 * Record the text poke event before the ksymbol unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 		 * event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 		perf_event_text_poke((void *)ops->trampoline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 				     (void *)ops->trampoline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 				     ops->trampoline_size, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 		perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 				   ops->trampoline, ops->trampoline_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 				   true, FTRACE_TRAMPOLINE_SYM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 		/* Remove from kallsyms after the perf events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 		ftrace_remove_trampoline_from_kallsyms(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	arch_ftrace_trampoline_free(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) static void ftrace_startup_enable(int command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	if (saved_ftrace_func != ftrace_trace_function) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 		saved_ftrace_func = ftrace_trace_function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 		command |= FTRACE_UPDATE_TRACE_FUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	if (!command || !ftrace_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	ftrace_run_update_code(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) static void ftrace_startup_all(int command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	update_all_ops = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	ftrace_startup_enable(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	update_all_ops = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) int ftrace_startup(struct ftrace_ops *ops, int command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 	ret = __register_ftrace_function(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	ftrace_start_up++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	 * Note that ftrace probes uses this to start up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	 * and modify functions it will probe. But we still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 	 * set the ADDING flag for modification, as probes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 	 * do not have trampolines. If they add them in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	 * future, then the probes will need to distinguish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	 * between adding and updating probes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	ret = ftrace_hash_ipmodify_enable(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 		/* Rollback registration process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 		__unregister_ftrace_function(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 		ftrace_start_up--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 		if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 			ftrace_trampoline_free(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	if (ftrace_hash_rec_enable(ops, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 		command |= FTRACE_UPDATE_CALLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	ftrace_startup_enable(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	ops->flags &= ~FTRACE_OPS_FL_ADDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) int ftrace_shutdown(struct ftrace_ops *ops, int command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	ret = __unregister_ftrace_function(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	ftrace_start_up--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	 * Just warn in case of unbalance, no need to kill ftrace, it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	 * critical but the ftrace_call callers may be never nopped again after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	 * further ftrace uses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 	WARN_ON_ONCE(ftrace_start_up < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	/* Disabling ipmodify never fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 	ftrace_hash_ipmodify_disable(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 	if (ftrace_hash_rec_disable(ops, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 		command |= FTRACE_UPDATE_CALLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	if (saved_ftrace_func != ftrace_trace_function) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 		saved_ftrace_func = ftrace_trace_function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 		command |= FTRACE_UPDATE_TRACE_FUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 	if (!command || !ftrace_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 		 * If these are dynamic or per_cpu ops, they still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 		 * need their data freed. Since, function tracing is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 		 * not currently active, we can just free them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 		 * without synchronizing all CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 		if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 			goto free_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	 * If the ops uses a trampoline, then it needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	 * tested first on update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	ops->flags |= FTRACE_OPS_FL_REMOVING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 	removed_ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 	/* The trampoline logic checks the old hashes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 	ops->old_hash.filter_hash = ops->func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 	ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 	ftrace_run_update_code(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	 * If there's no more ops registered with ftrace, run a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 	 * sanity check to make sure all rec flags are cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 	if (rcu_dereference_protected(ftrace_ops_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 		struct ftrace_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 		struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 		do_for_each_ftrace_rec(pg, rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 			if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 				pr_warn("  %pS flags:%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 					(void *)rec->ip, rec->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 		} while_for_each_ftrace_rec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	ops->old_hash.filter_hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	ops->old_hash.notrace_hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	removed_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	ops->flags &= ~FTRACE_OPS_FL_REMOVING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 	 * Dynamic ops may be freed, we must make sure that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 	 * callers are done before leaving this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 	 * The same goes for freeing the per_cpu data of the per_cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 	 * ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 	if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 		 * We need to do a hard force of sched synchronization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 		 * This is because we use preempt_disable() to do RCU, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 		 * the function tracers can be called where RCU is not watching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 		 * (like before user_exit()). We can not rely on the RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 		 * infrastructure to do the synchronization, thus we must do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 		 * ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 		synchronize_rcu_tasks_rude();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 		 * When the kernel is preemptive, tasks can be preempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 		 * while on a ftrace trampoline. Just scheduling a task on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 		 * a CPU is not good enough to flush them. Calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 		 * synchornize_rcu_tasks() will wait for those tasks to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 		 * execute and either schedule voluntarily or enter user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 		if (IS_ENABLED(CONFIG_PREEMPTION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 			synchronize_rcu_tasks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)  free_ops:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 		ftrace_trampoline_free(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) static void ftrace_startup_sysctl(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 	int command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 	/* Force update next time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	saved_ftrace_func = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	/* ftrace_start_up is true if we want ftrace running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 	if (ftrace_start_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 		command = FTRACE_UPDATE_CALLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 		if (ftrace_graph_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 			command |= FTRACE_START_FUNC_RET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 		ftrace_startup_enable(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) static void ftrace_shutdown_sysctl(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	int command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 	/* ftrace_start_up is true if ftrace is running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 	if (ftrace_start_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 		command = FTRACE_DISABLE_CALLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 		if (ftrace_graph_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 			command |= FTRACE_STOP_FUNC_RET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 		ftrace_run_update_code(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) static u64		ftrace_update_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) unsigned long		ftrace_update_tot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) unsigned long		ftrace_number_of_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) unsigned long		ftrace_number_of_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) static inline int ops_traces_mod(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 	 * Filter_hash being empty will default to trace module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 	 * But notrace hash requires a test of individual module functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 		ftrace_hash_empty(ops->func_hash->notrace_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071)  * Check if the current ops references the record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073)  * If the ops traces all functions, then it was already accounted for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)  * If the ops does not trace the current record function, skip it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)  * If the ops ignores the function via notrace filter, skip it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 	/* If ops isn't enabled, ignore it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	/* If ops traces all then it includes this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	if (ops_traces_mod(ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	/* The function must be in the filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 	    !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 	/* If in notrace hash, we ignore it too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 	struct ftrace_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 	struct dyn_ftrace *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 	u64 start, stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 	unsigned long update_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 	unsigned long rec_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 	start = ftrace_now(raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	 * When a module is loaded, this function is called to convert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	 * the calls to mcount in its text to nops, and also to create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	 * an entry in the ftrace data. Now, if ftrace is activated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	 * after this call, but before the module sets its text to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 	 * read-only, the modification of enabling ftrace can fail if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 	 * the read-only is done while ftrace is converting the calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 	 * To prevent this, the module's records are set as disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 	 * and will be enabled after the call to set the module's text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 	 * to read-only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 	if (mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 		rec_flags |= FTRACE_FL_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 	for (pg = new_pgs; pg; pg = pg->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 		for (i = 0; i < pg->index; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 			/* If something went wrong, bail without enabling anything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 			if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 			p = &pg->records[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 			p->flags = rec_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 			 * Do the initial record conversion from mcount jump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 			 * to the NOP instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 			if (!__is_defined(CC_USING_NOP_MCOUNT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 			    !ftrace_nop_initialize(mod, p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 			update_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 	stop = ftrace_now(raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 	ftrace_update_time = stop - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 	ftrace_update_tot_cnt += update_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) static int ftrace_allocate_records(struct ftrace_page *pg, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 	int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 	int pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 	int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 	if (WARN_ON(!count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 	pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 	order = get_count_order(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 	 * We want to fill as much as possible. No more than a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 	 * may be empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 	if (!is_power_of_2(pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 		order--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)  again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 	if (!pg->records) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 		/* if we can't allocate this size, try something smaller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 		if (!order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		order >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 	ftrace_number_of_pages += 1 << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 	ftrace_number_of_groups++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 	pg->size = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 	if (cnt > count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 		cnt = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 	return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) static struct ftrace_page *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) ftrace_allocate_pages(unsigned long num_to_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 	struct ftrace_page *start_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 	struct ftrace_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 	int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 	int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 	if (!num_to_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 	if (!pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 	 * Try to allocate as much as possible in one continues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 	 * location that fills in all of the space. We want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	 * waste as little space as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 		cnt = ftrace_allocate_records(pg, num_to_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 		if (cnt < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 			goto free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 		num_to_init -= cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 		if (!num_to_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 		if (!pg->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 			goto free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 		pg = pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 	return start_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235)  free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 	pg = start_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 	while (pg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 		if (order >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 			free_pages((unsigned long)pg->records, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 		start_pg = pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 		kfree(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 		pg = start_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 		ftrace_number_of_pages -= 1 << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 		ftrace_number_of_groups--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	pr_info("ftrace: FAILED to allocate memory for functions\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) struct ftrace_iterator {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 	loff_t				pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 	loff_t				func_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 	loff_t				mod_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	struct ftrace_page		*pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 	struct dyn_ftrace		*func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 	struct ftrace_func_probe	*probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 	struct ftrace_func_entry	*probe_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 	struct trace_parser		parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 	struct ftrace_hash		*hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 	struct ftrace_ops		*ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 	struct trace_array		*tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 	struct list_head		*mod_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 	int				pidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 	int				idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 	unsigned			flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) t_probe_next(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 	struct ftrace_iterator *iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 	struct trace_array *tr = iter->ops->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 	struct list_head *func_probes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 	struct ftrace_hash *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 	struct list_head *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 	struct hlist_node *hnd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 	struct hlist_head *hhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 	(*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 	iter->pos = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 	if (!tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 	func_probes = &tr->func_probes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 	if (list_empty(func_probes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 	if (!iter->probe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 		next = func_probes->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 		iter->probe = list_entry(next, struct ftrace_func_probe, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	if (iter->probe_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 		hnd = &iter->probe_entry->hlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	hash = iter->probe->ops.func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 	 * A probe being registered may temporarily have an empty hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 	 * and it's at the end of the func_probes list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 	if (!hash || hash == EMPTY_HASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 	size = 1 << hash->size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312)  retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 	if (iter->pidx >= size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 		if (iter->probe->list.next == func_probes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 		next = iter->probe->list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 		iter->probe = list_entry(next, struct ftrace_func_probe, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 		hash = iter->probe->ops.func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 		size = 1 << hash->size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 		iter->pidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 	hhd = &hash->buckets[iter->pidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 	if (hlist_empty(hhd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 		iter->pidx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 		hnd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 	if (!hnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 		hnd = hhd->first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 		hnd = hnd->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 		if (!hnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 			iter->pidx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 	if (WARN_ON_ONCE(!hnd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 	iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 	return iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) static void *t_probe_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 	struct ftrace_iterator *iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 	void *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 	loff_t l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 	if (!(iter->flags & FTRACE_ITER_DO_PROBES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 	if (iter->mod_pos > *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 	iter->probe = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 	iter->probe_entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 	iter->pidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 	for (l = 0; l <= (*pos - iter->mod_pos); ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 		p = t_probe_next(m, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 		if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 	/* Only set this if we have an item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 	iter->flags |= FTRACE_ITER_PROBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 	return iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 	struct ftrace_func_entry *probe_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	struct ftrace_probe_ops *probe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 	struct ftrace_func_probe *probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 	probe = iter->probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 	probe_entry = iter->probe_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 	if (WARN_ON_ONCE(!probe || !probe_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 	probe_ops = probe->probe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 	if (probe_ops->print)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 		return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 	seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 		   (void *)probe_ops->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) t_mod_next(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	struct ftrace_iterator *iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 	struct trace_array *tr = iter->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	(*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 	iter->pos = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 	iter->mod_list = iter->mod_list->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 	if (iter->mod_list == &tr->mod_trace ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 	    iter->mod_list == &tr->mod_notrace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 		iter->flags &= ~FTRACE_ITER_MOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 	iter->mod_pos = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 	return iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) static void *t_mod_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 	struct ftrace_iterator *iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 	void *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 	loff_t l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 	if (iter->func_pos > *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 	iter->mod_pos = iter->func_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	/* probes are only available if tr is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 	if (!iter->tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 	for (l = 0; l <= (*pos - iter->func_pos); ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 		p = t_mod_next(m, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 		if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 	if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 		iter->flags &= ~FTRACE_ITER_MOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 		return t_probe_start(m, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 	/* Only set this if we have an item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 	iter->flags |= FTRACE_ITER_MOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 	return iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 	struct ftrace_mod_load *ftrace_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 	struct trace_array *tr = iter->tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 	if (WARN_ON_ONCE(!iter->mod_list) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 			 iter->mod_list == &tr->mod_trace ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 			 iter->mod_list == &tr->mod_notrace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 	ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 	if (ftrace_mod->func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 		seq_printf(m, "%s", ftrace_mod->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 		seq_putc(m, '*');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 	seq_printf(m, ":mod:%s\n", ftrace_mod->module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) t_func_next(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 	struct ftrace_iterator *iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 	struct dyn_ftrace *rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 	(*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486)  retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 	if (iter->idx >= iter->pg->index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 		if (iter->pg->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 			iter->pg = iter->pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 			iter->idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 		rec = &iter->pg->records[iter->idx++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 		if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 		     !ftrace_lookup_ip(iter->hash, rec->ip)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 		    ((iter->flags & FTRACE_ITER_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 		     !(rec->flags & FTRACE_FL_ENABLED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 			rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	if (!rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	iter->pos = iter->func_pos = *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 	iter->func = rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	return iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) t_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 	struct ftrace_iterator *iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 	loff_t l = *pos; /* t_probe_start() must use original pos */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 	void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 	if (iter->flags & FTRACE_ITER_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 		return t_probe_next(m, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 	if (iter->flags & FTRACE_ITER_MOD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 		return t_mod_next(m, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 	if (iter->flags & FTRACE_ITER_PRINTALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 		/* next must increment pos, and t_probe_start does not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 		(*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 		return t_mod_start(m, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 	ret = t_func_next(m, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 		return t_mod_start(m, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) static void reset_iter_read(struct ftrace_iterator *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 	iter->pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 	iter->func_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) static void *t_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 	struct ftrace_iterator *iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 	void *p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 	loff_t l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 	 * If an lseek was done, then reset and start from beginning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 	if (*pos < iter->pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 		reset_iter_read(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 	 * For set_ftrace_filter reading, if we have the filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 	 * off, we can short cut and just print out that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 	 * functions are enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 	if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	    ftrace_hash_empty(iter->hash)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 		iter->func_pos = 1; /* Account for the message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 		if (*pos > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 			return t_mod_start(m, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 		iter->flags |= FTRACE_ITER_PRINTALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 		/* reset in case of seek/pread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 		iter->flags &= ~FTRACE_ITER_PROBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 		return iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	if (iter->flags & FTRACE_ITER_MOD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 		return t_mod_start(m, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	 * Unfortunately, we need to restart at ftrace_pages_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 	 * every time we let go of the ftrace_mutex. This is because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 	 * those pointers can change without the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	iter->pg = ftrace_pages_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 	iter->idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 	for (l = 0; l <= *pos; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 		p = t_func_next(m, &l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 		if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 		return t_mod_start(m, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 	return iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) static void t_stop(struct seq_file *m, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) void * __weak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 				struct dyn_ftrace *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 	void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 	ptr = arch_ftrace_trampoline_func(ops, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 	if (ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 		seq_printf(m, " ->%pS", ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) static int t_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 	struct ftrace_iterator *iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 	if (iter->flags & FTRACE_ITER_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 		return t_probe_show(m, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 	if (iter->flags & FTRACE_ITER_MOD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 		return t_mod_show(m, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 	if (iter->flags & FTRACE_ITER_PRINTALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 		if (iter->flags & FTRACE_ITER_NOTRACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 			seq_puts(m, "#### no functions disabled ####\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 			seq_puts(m, "#### all functions enabled ####\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 	rec = iter->func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 	if (!rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	seq_printf(m, "%ps", (void *)rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 	if (iter->flags & FTRACE_ITER_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 		struct ftrace_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 		seq_printf(m, " (%ld)%s%s%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 			   ftrace_rec_count(rec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 			   rec->flags & FTRACE_FL_REGS ? " R" : "  ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 			   rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 			   rec->flags & FTRACE_FL_DIRECT ? " D" : "  ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 			ops = ftrace_find_tramp_ops_any(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 			if (ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 				do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 					seq_printf(m, "\ttramp: %pS (%pS)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 						   (void *)ops->trampoline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 						   (void *)ops->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 					add_trampoline_func(m, ops, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 					ops = ftrace_find_tramp_ops_next(rec, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 				} while (ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 				seq_puts(m, "\ttramp: ERROR!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 			add_trampoline_func(m, NULL, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 		if (rec->flags & FTRACE_FL_DIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 			unsigned long direct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 			direct = ftrace_find_rec_direct(rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 			if (direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 				seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 	seq_putc(m, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) static const struct seq_operations show_ftrace_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 	.start = t_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 	.next = t_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 	.stop = t_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 	.show = t_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) ftrace_avail_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 	struct ftrace_iterator *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 	ret = security_locked_down(LOCKDOWN_TRACEFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 	if (!iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 	iter->pg = ftrace_pages_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 	iter->ops = &global_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) ftrace_enabled_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 	struct ftrace_iterator *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 	 * This shows us what functions are currently being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 	 * traced and by what. Not sure if we want lockdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 	 * to hide such critical information for an admin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 	 * Although, perhaps it can show information we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 	 * want people to see, but if something is tracing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 	 * something, we probably want to know about it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 	if (!iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 	iter->pg = ftrace_pages_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 	iter->flags = FTRACE_ITER_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 	iter->ops = &global_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746)  * ftrace_regex_open - initialize function tracer filter files
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747)  * @ops: The ftrace_ops that hold the hash filters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)  * @flag: The type of filter to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749)  * @inode: The inode, usually passed in to your open routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750)  * @file: The file, usually passed in to your open routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752)  * ftrace_regex_open() initializes the filter files for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753)  * @ops. Depending on @flag it may process the filter hash or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754)  * the notrace hash of @ops. With this called from the open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755)  * routine, you can use ftrace_filter_write() for the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756)  * routine if @flag has FTRACE_ITER_FILTER set, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757)  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758)  * tracing_lseek() should be used as the lseek routine, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759)  * release must call ftrace_regex_release().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) ftrace_regex_open(struct ftrace_ops *ops, int flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 		  struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 	struct ftrace_iterator *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 	struct ftrace_hash *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 	struct list_head *mod_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 	struct trace_array *tr = ops->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 	ftrace_ops_init(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 	if (tracing_check_open_get_tr(tr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 	if (!iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 	iter->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 	iter->flags = flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 	iter->tr = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 	mutex_lock(&ops->func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 	if (flag & FTRACE_ITER_NOTRACE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 		hash = ops->func_hash->notrace_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 		mod_head = tr ? &tr->mod_notrace : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 		hash = ops->func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 		mod_head = tr ? &tr->mod_trace : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 	iter->mod_list = mod_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 	if (file->f_mode & FMODE_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 		if (file->f_flags & O_TRUNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 			iter->hash = alloc_ftrace_hash(size_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 			clear_ftrace_mod_list(mod_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 	        } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 		if (!iter->hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 			trace_parser_put(&iter->parser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 		iter->hash = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 	if (file->f_mode & FMODE_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 		iter->pg = ftrace_pages_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 		ret = seq_open(file, &show_ftrace_seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 			struct seq_file *m = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 			m->private = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 			/* Failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 			free_ftrace_hash(iter->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 			trace_parser_put(&iter->parser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 		file->private_data = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836)  out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 	mutex_unlock(&ops->func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 		kfree(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 		if (tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 			trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) ftrace_filter_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 	struct ftrace_ops *ops = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 	/* Checks for tracefs lockdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 	return ftrace_regex_open(ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 			FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 			inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) ftrace_notrace_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 	struct ftrace_ops *ops = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 	/* Checks for tracefs lockdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 				 inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) struct ftrace_glob {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 	char *search;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 	unsigned len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 	int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878)  * If symbols in an architecture don't correspond exactly to the user-visible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879)  * name of what they represent, it is possible to define this function to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880)  * perform the necessary adjustments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) char * __weak arch_ftrace_match_adjust(char *str, const char *search)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 	return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) static int ftrace_match(char *str, struct ftrace_glob *g)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 	int matched = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 	int slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 	str = arch_ftrace_match_adjust(str, g->search);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 	switch (g->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 	case MATCH_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 		if (strcmp(str, g->search) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 			matched = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 	case MATCH_FRONT_ONLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 		if (strncmp(str, g->search, g->len) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 			matched = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 	case MATCH_MIDDLE_ONLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 		if (strstr(str, g->search))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 			matched = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 	case MATCH_END_ONLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) 		slen = strlen(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 		if (slen >= g->len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) 		    memcmp(str + slen - g->len, g->search, g->len) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) 			matched = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 	case MATCH_GLOB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) 		if (glob_match(g->search, str))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 			matched = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 	return matched;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 	entry = ftrace_lookup_ip(hash, rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) 	if (clear_filter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 		/* Do nothing if it doesn't exist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) 		if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 		free_hash_entry(hash, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 		/* Do nothing if it exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 		if (entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 		ret = add_hash_entry(hash, rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 		 int clear_filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 	long index = simple_strtoul(func_g->search, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 	struct ftrace_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 	/* The index starts at 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 	if (--index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 	do_for_each_ftrace_rec(pg, rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 		if (pg->index <= index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 			index -= pg->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 			/* this is a double loop, break goes to the next page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 		rec = &pg->records[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 		enter_record(hash, rec, clear_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 	} while_for_each_ftrace_rec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 		struct ftrace_glob *mod_g, int exclude_mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 	char str[KSYM_SYMBOL_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 	char *modname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 	if (mod_g) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 		int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 		/* blank module name to match all modules */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 		if (!mod_g->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) 			/* blank module globbing: modname xor exclude_mod */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 			if (!exclude_mod != !modname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) 				goto func_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 		 * exclude_mod is set to trace everything but the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 		 * module. If it is set and the module matches, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 		 * return 0. If it is not set, and the module doesn't match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 		 * also return 0. Otherwise, check the function to see if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 		 * that matches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 		if (!mod_matches == !exclude_mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) func_match:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 		/* blank search means to match all funcs in the mod */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 		if (!func_g->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 	return ftrace_match(str, func_g);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 	struct ftrace_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 	struct ftrace_glob func_g = { .type = MATCH_FULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 	struct ftrace_glob mod_g = { .type = MATCH_FULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 	struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 	int exclude_mod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 	int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 	int clear_filter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 	if (func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) 		func_g.type = filter_parse_regex(func, len, &func_g.search,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 						 &clear_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 		func_g.len = strlen(func_g.search);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 	if (mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 		mod_g.type = filter_parse_regex(mod, strlen(mod),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 				&mod_g.search, &exclude_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 		mod_g.len = strlen(mod_g.search);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 	if (func_g.type == MATCH_INDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 		found = add_rec_by_index(hash, &func_g, clear_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 	do_for_each_ftrace_rec(pg, rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 		if (rec->flags & FTRACE_FL_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 		if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 			ret = enter_record(hash, rec, clear_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 				found = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 				goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 			found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 	} while_for_each_ftrace_rec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057)  out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 	return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 	return match_records(hash, buff, len, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) static void ftrace_ops_update_code(struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 				   struct ftrace_ops_hash *old_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 	struct ftrace_ops *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 	if (!ftrace_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 	if (ops->flags & FTRACE_OPS_FL_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 		ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 	 * If this is the shared global_ops filter, then we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 	 * check if there is another ops that shares it, is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 	 * If so, we still need to run the modify code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 	if (ops->func_hash != &global_ops.local_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 	do_for_each_ftrace_op(op, ftrace_ops_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 		if (op->func_hash == &global_ops.local_hash &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 		    op->flags & FTRACE_OPS_FL_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 			ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 			/* Only need to do this once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 	} while_for_each_ftrace_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 					   struct ftrace_hash **orig_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) 					   struct ftrace_hash *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 					   int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) 	struct ftrace_ops_hash old_hash_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 	struct ftrace_hash *old_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 	old_hash = *orig_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 	old_hash_ops.filter_hash = ops->func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 	old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 		ftrace_ops_update_code(ops, &old_hash_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 		free_ftrace_hash_rcu(old_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) static bool module_exists(const char *module)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 	/* All modules have the symbol __this_module */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 	static const char this_mod[] = "__this_module";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 	char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 	int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 	n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 	if (n > sizeof(modname) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 	val = module_kallsyms_lookup_name(modname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 	return val != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) static int cache_mod(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 		     const char *func, char *module, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 	struct ftrace_mod_load *ftrace_mod, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 	struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 	/* We do not cache inverse filters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 	if (func[0] == '!') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 		func++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 		/* Look to remove this hash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 		list_for_each_entry_safe(ftrace_mod, n, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 			if (strcmp(ftrace_mod->module, module) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 			/* no func matches all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 			if (strcmp(func, "*") == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 			    (ftrace_mod->func &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 			     strcmp(ftrace_mod->func, func) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 				ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 				free_ftrace_mod(ftrace_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 	ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 	/* We only care about modules that have not been loaded yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 	if (module_exists(module))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 	/* Save this string off, and execute it when the module is loaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) 	ret = ftrace_add_mod(tr, func, module, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 		 int reset, int enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 			     char *mod, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 	struct ftrace_mod_load *ftrace_mod, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 	struct ftrace_hash **orig_hash, *new_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 	LIST_HEAD(process_mods);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 	char *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 	mutex_lock(&ops->func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 		orig_hash = &ops->func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 		orig_hash = &ops->func_hash->notrace_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 	new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 					      *orig_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 	if (!new_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 		goto out; /* warn? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 	list_for_each_entry_safe(ftrace_mod, n, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) 		if (strcmp(ftrace_mod->module, mod) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 		if (ftrace_mod->func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 			func = kstrdup(ftrace_mod->func, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 			func = kstrdup("*", GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 		if (!func) /* warn? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 		list_del(&ftrace_mod->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 		list_add(&ftrace_mod->list, &process_mods);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 		/* Use the newly allocated func, as it may be "*" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 		kfree(ftrace_mod->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 		ftrace_mod->func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 	list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 		func = ftrace_mod->func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 		/* Grabs ftrace_lock, which is why we have this extra step */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 		match_records(new_hash, func, strlen(func), mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 		free_ftrace_mod(ftrace_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) 	if (enable && list_empty(head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 		new_hash->flags &= ~FTRACE_HASH_FL_MOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) 	ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) 					      new_hash, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) 	mutex_unlock(&ops->func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) 	free_ftrace_hash(new_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) static void process_cached_mods(const char *mod_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 	struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 	char *mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 	mod = kstrdup(mod_name, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 	if (!mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 	mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 		if (!list_empty(&tr->mod_trace))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) 			process_mod_list(&tr->mod_trace, tr->ops, mod, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 		if (!list_empty(&tr->mod_notrace))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) 			process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 	mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) 	kfree(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279)  * We register the module command as a template to show others how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280)  * to register the a command as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) 		    char *func_orig, char *cmd, char *module, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) 	char *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) 	/* match_records() modifies func, and we need the original */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) 	func = kstrdup(func_orig, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) 	if (!func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 	 * cmd == 'mod' because we only registered this func
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 	 * for the 'mod' ftrace_func_command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 	 * But if you register one func with multiple commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 	 * you can tell which command was used by the cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) 	 * parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 	ret = match_records(hash, func, strlen(func), module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 	kfree(func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 		return cache_mod(tr, func_orig, module, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) static struct ftrace_func_command ftrace_mod_cmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 	.name			= "mod",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) 	.func			= ftrace_mod_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) static int __init ftrace_mod_cmd_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) 	return register_ftrace_command(&ftrace_mod_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) core_initcall(ftrace_mod_cmd_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 				      struct ftrace_ops *op, struct pt_regs *pt_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 	struct ftrace_probe_ops *probe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 	struct ftrace_func_probe *probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) 	probe = container_of(op, struct ftrace_func_probe, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) 	probe_ops = probe->probe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) 	 * Disable preemption for these calls to prevent a RCU grace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 	 * period. This syncs the hash iteration and freeing of items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) 	 * on the hash. rcu_read_lock is too dangerous here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) 	preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) 	probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) 	preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) struct ftrace_func_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) 	struct ftrace_func_entry	entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) 	void				*data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) struct ftrace_func_mapper {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 	struct ftrace_hash		hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352)  * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354)  * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) 	struct ftrace_hash *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 	 * The mapper is simply a ftrace_hash, but since the entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 	 * in the hash are not ftrace_func_entry type, we define it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 	 * as a separate structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) 	return (struct ftrace_func_mapper *)hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370)  * ftrace_func_mapper_find_ip - Find some data mapped to an ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371)  * @mapper: The mapper that has the ip maps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372)  * @ip: the instruction pointer to find the data for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374)  * Returns the data mapped to @ip if found otherwise NULL. The return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375)  * is actually the address of the mapper data pointer. The address is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376)  * returned for use cases where the data is no bigger than a long, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377)  * the user can use the data pointer as its data instead of having to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378)  * allocate more memory for the reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) 				  unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) 	struct ftrace_func_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) 	entry = ftrace_lookup_ip(&mapper->hash, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) 	map = (struct ftrace_func_map *)entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) 	return &map->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395)  * ftrace_func_mapper_add_ip - Map some data to an ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396)  * @mapper: The mapper that has the ip maps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397)  * @ip: The instruction pointer address to map @data to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398)  * @data: The data to map to @ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400)  * Returns 0 on success otherwise an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) 			      unsigned long ip, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) 	struct ftrace_func_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) 	entry = ftrace_lookup_ip(&mapper->hash, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) 	if (entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) 	map = kmalloc(sizeof(*map), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) 	if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) 	map->entry.ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) 	map->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) 	__add_hash_entry(&mapper->hash, &map->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425)  * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426)  * @mapper: The mapper that has the ip maps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427)  * @ip: The instruction pointer address to remove the data from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429)  * Returns the data if it is found, otherwise NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430)  * Note, if the data pointer is used as the data itself, (see 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431)  * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432)  * if the data pointer was set to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) 				   unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) 	struct ftrace_func_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) 	entry = ftrace_lookup_ip(&mapper->hash, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) 	map = (struct ftrace_func_map *)entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) 	data = map->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) 	remove_hash_entry(&mapper->hash, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) 	kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) 	return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455)  * free_ftrace_func_mapper - free a mapping of ips and data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456)  * @mapper: The mapper that has the ip maps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457)  * @free_func: A function to be called on each data item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459)  * This is used to free the function mapper. The @free_func is optional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460)  * and can be used if the data needs to be freed as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) 			     ftrace_mapper_func free_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) 	struct ftrace_func_map *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) 	struct hlist_head *hhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) 	int size, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) 	if (!mapper)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) 	if (free_func && mapper->hash.count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) 		size = 1 << mapper->hash.size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) 		for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) 			hhd = &mapper->hash.buckets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) 			hlist_for_each_entry(entry, hhd, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) 				map = (struct ftrace_func_map *)entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) 				free_func(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) 	free_ftrace_hash(&mapper->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) static void release_probe(struct ftrace_func_probe *probe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) 	struct ftrace_probe_ops *probe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) 	WARN_ON(probe->ref <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) 	/* Subtract the ref that was used to protect this instance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) 	probe->ref--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) 	if (!probe->ref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) 		probe_ops = probe->probe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) 		 * Sending zero as ip tells probe_ops to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) 		 * the probe->data itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) 		if (probe_ops->free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) 			probe_ops->free(probe_ops, probe->tr, 0, probe->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) 		list_del(&probe->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) 		kfree(probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) static void acquire_probe_locked(struct ftrace_func_probe *probe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) 	 * Add one ref to keep it from being freed when releasing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) 	 * ftrace_lock mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) 	probe->ref++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) register_ftrace_function_probe(char *glob, struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) 			       struct ftrace_probe_ops *probe_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) 			       void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) 	struct ftrace_func_probe *probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) 	struct ftrace_hash **orig_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) 	struct ftrace_hash *old_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) 	struct ftrace_hash *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) 	if (WARN_ON(!tr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) 	/* We do not support '!' for function probes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) 	if (WARN_ON(glob[0] == '!'))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) 	/* Check if the probe_ops is already registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) 	list_for_each_entry(probe, &tr->func_probes, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) 		if (probe->probe_ops == probe_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) 	if (&probe->list == &tr->func_probes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) 		probe = kzalloc(sizeof(*probe), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) 		if (!probe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) 			mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) 		probe->probe_ops = probe_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) 		probe->ops.func = function_trace_probe_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) 		probe->tr = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) 		ftrace_ops_init(&probe->ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) 		list_add(&probe->list, &tr->func_probes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) 	acquire_probe_locked(probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) 	 * Note, there's a small window here that the func_hash->filter_hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) 	 * may be NULL or empty. Need to be careful when reading the loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) 	mutex_lock(&probe->ops.func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) 	orig_hash = &probe->ops.func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) 	old_hash = *orig_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) 	if (!hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) 	ret = ftrace_match_records(hash, glob, strlen(glob));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) 	/* Nothing found? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) 	size = 1 << hash->size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) 	for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) 			if (ftrace_lookup_ip(old_hash, entry->ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) 			 * The caller might want to do something special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) 			 * for each function we find. We call the callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) 			 * to give the caller an opportunity to do so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) 			if (probe_ops->init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) 				ret = probe_ops->init(probe_ops, tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) 						      entry->ip, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) 						      &probe->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) 				if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) 					if (probe_ops->free && count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) 						probe_ops->free(probe_ops, tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) 								0, probe->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) 					probe->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) 	if (!count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) 		/* Nothing was added? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) 	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) 					      hash, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) 		goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) 	/* One ref for each new function traced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) 	probe->ref += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) 	if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) 		ret = ftrace_startup(&probe->ops, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635)  out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) 		ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) 	mutex_unlock(&probe->ops.func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) 	free_ftrace_hash(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) 	release_probe(probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648)  err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) 	if (!probe_ops->free || !count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) 	/* Failed to do the move, need to call the free functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) 	for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) 			if (ftrace_lookup_ip(old_hash, entry->ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) 			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) 	goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) 				      struct ftrace_probe_ops *probe_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) 	struct ftrace_ops_hash old_hash_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) 	struct ftrace_func_probe *probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) 	struct ftrace_glob func_g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) 	struct ftrace_hash **orig_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) 	struct ftrace_hash *old_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) 	struct ftrace_hash *hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) 	struct hlist_node *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) 	struct hlist_head hhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) 	char str[KSYM_SYMBOL_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) 	int i, ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) 	if (!glob || !strlen(glob) || !strcmp(glob, "*"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) 		func_g.search = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) 		int not;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) 		func_g.type = filter_parse_regex(glob, strlen(glob),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) 						 &func_g.search, &not);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) 		func_g.len = strlen(func_g.search);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) 		/* we do not support '!' for function probes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) 		if (WARN_ON(not))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) 	/* Check if the probe_ops is already registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) 	list_for_each_entry(probe, &tr->func_probes, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) 		if (probe->probe_ops == probe_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) 	if (&probe->list == &tr->func_probes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) 		goto err_unlock_ftrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) 	ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) 	if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) 		goto err_unlock_ftrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) 	acquire_probe_locked(probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) 	mutex_lock(&probe->ops.func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) 	orig_hash = &probe->ops.func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) 	old_hash = *orig_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) 	if (ftrace_hash_empty(old_hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) 	old_hash_ops.filter_hash = old_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) 	/* Probes only have filters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) 	old_hash_ops.notrace_hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) 	ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) 	if (!hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) 	INIT_HLIST_HEAD(&hhd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) 	size = 1 << hash->size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) 	for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) 		hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) 			if (func_g.search) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) 				kallsyms_lookup(entry->ip, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) 						NULL, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) 				if (!ftrace_match(str, &func_g))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) 			remove_hash_entry(hash, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) 			hlist_add_head(&entry->hlist, &hhd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) 	/* Nothing found? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) 	if (!count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) 	WARN_ON(probe->ref < count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) 	probe->ref -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) 	if (ftrace_hash_empty(hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) 		ftrace_shutdown(&probe->ops, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) 	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) 					      hash, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) 	/* still need to update the function call sites */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) 	if (ftrace_enabled && !ftrace_hash_empty(hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) 		ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) 				       &old_hash_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) 	hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) 		hlist_del(&entry->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) 		if (probe_ops->free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) 			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) 		kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779)  out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) 	mutex_unlock(&probe->ops.func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) 	free_ftrace_hash(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) 	release_probe(probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787)  err_unlock_ftrace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) void clear_ftrace_function_probes(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) 	struct ftrace_func_probe *probe, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) 	list_for_each_entry_safe(probe, n, &tr->func_probes, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) 		unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) static LIST_HEAD(ftrace_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) static DEFINE_MUTEX(ftrace_cmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804)  * Currently we only register ftrace commands from __init, so mark this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805)  * __init too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) __init int register_ftrace_command(struct ftrace_func_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) 	struct ftrace_func_command *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) 	mutex_lock(&ftrace_cmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) 	list_for_each_entry(p, &ftrace_commands, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) 		if (strcmp(cmd->name, p->name) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) 	list_add(&cmd->list, &ftrace_commands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820)  out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) 	mutex_unlock(&ftrace_cmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827)  * Currently we only unregister ftrace commands from __init, so mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828)  * this __init too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) 	struct ftrace_func_command *p, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) 	int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) 	mutex_lock(&ftrace_cmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) 	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) 		if (strcmp(cmd->name, p->name) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) 			list_del_init(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843)  out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) 	mutex_unlock(&ftrace_cmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) static int ftrace_process_regex(struct ftrace_iterator *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) 				char *buff, int len, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) 	struct ftrace_hash *hash = iter->hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) 	struct trace_array *tr = iter->ops->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) 	char *func, *command, *next = buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) 	struct ftrace_func_command *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) 	func = strsep(&next, ":");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) 	if (!next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) 		ret = ftrace_match_records(hash, func, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) 	/* command found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) 	command = strsep(&next, ":");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) 	mutex_lock(&ftrace_cmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) 	list_for_each_entry(p, &ftrace_commands, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) 		if (strcmp(p->name, command) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) 			ret = p->func(tr, hash, func, command, next, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880)  out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) 	mutex_unlock(&ftrace_cmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) ftrace_regex_write(struct file *file, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) 		   size_t cnt, loff_t *ppos, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) 	struct ftrace_iterator *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) 	struct trace_parser *parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) 	ssize_t ret, read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) 	if (!cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) 	if (file->f_mode & FMODE_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) 		struct seq_file *m = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) 		iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) 		iter = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) 	/* iter->hash is a local copy, so we don't need regex_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) 	parser = &iter->parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) 	read = trace_get_user(parser, ubuf, cnt, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) 	if (read >= 0 && trace_parser_loaded(parser) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) 	    !trace_parser_cont(parser)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) 		ret = ftrace_process_regex(iter, parser->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) 					   parser->idx, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) 		trace_parser_clear(parser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) 	ret = read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) ftrace_filter_write(struct file *file, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) 		    size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) 	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) ftrace_notrace_write(struct file *file, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) 		     size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) 	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) 	if (!ftrace_location(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) 	if (remove) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) 		entry = ftrace_lookup_ip(hash, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) 		if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) 		free_hash_entry(hash, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) 	return add_hash_entry(hash, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) 		unsigned long ip, int remove, int reset, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) 	struct ftrace_hash **orig_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) 	struct ftrace_hash *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) 	mutex_lock(&ops->func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) 		orig_hash = &ops->func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) 		orig_hash = &ops->func_hash->notrace_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) 	if (reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) 		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) 		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) 	if (!hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) 		goto out_regex_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) 	if (buf && !ftrace_match_records(hash, buf, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) 		goto out_regex_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) 	if (ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) 		ret = ftrace_match_addr(hash, ip, remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) 			goto out_regex_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) 	ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000)  out_regex_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) 	mutex_unlock(&ops->func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) 	free_ftrace_hash(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) 		int reset, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) 	return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) struct ftrace_direct_func {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) 	struct list_head	next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) 	unsigned long		addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) 	int			count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) static LIST_HEAD(ftrace_direct_funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025)  * ftrace_find_direct_func - test an address if it is a registered direct caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026)  * @addr: The address of a registered direct caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028)  * This searches to see if a ftrace direct caller has been registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029)  * at a specific address, and if so, it returns a descriptor for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031)  * This can be used by architecture code to see if an address is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032)  * a direct caller (trampoline) attached to a fentry/mcount location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033)  * This is useful for the function_graph tracer, as it may need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034)  * do adjustments if it traced a location that also has a direct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035)  * trampoline attached to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) 	struct ftrace_direct_func *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) 	bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) 	/* May be called by fgraph trampoline (protected by rcu tasks) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) 	list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) 		if (entry->addr == addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) 			found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) 	if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) 		return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) 	struct ftrace_direct_func *direct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) 	direct = kmalloc(sizeof(*direct), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) 	if (!direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) 	direct->addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) 	direct->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) 	list_add_rcu(&direct->next, &ftrace_direct_funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) 	ftrace_direct_func_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) 	return direct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070)  * register_ftrace_direct - Call a custom trampoline directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071)  * @ip: The address of the nop at the beginning of a function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072)  * @addr: The address of the trampoline to call at @ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074)  * This is used to connect a direct call from the nop location (@ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075)  * at the start of ftrace traced functions. The location that it calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076)  * (@addr) must be able to handle a direct call, and save the parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077)  * of the function being traced, and restore them (or inject new ones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078)  * if needed), before returning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081)  *  0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082)  *  -EBUSY - Another direct function is already attached (there can be only one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083)  *  -ENODEV - @ip does not point to a ftrace nop location (or not supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084)  *  -ENOMEM - There was an allocation failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) int register_ftrace_direct(unsigned long ip, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) 	struct ftrace_direct_func *direct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) 	struct ftrace_hash *free_hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) 	int ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) 	mutex_lock(&direct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) 	/* See if there's a direct function at @ip already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) 	if (ftrace_find_rec_direct(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) 	ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) 	rec = lookup_rec(ip, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) 	if (!rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) 	 * Check if the rec says it has a direct call but we didn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) 	 * find one earlier?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) 	if (WARN_ON(rec->flags & FTRACE_FL_DIRECT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) 	/* Make sure the ip points to the exact record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) 	if (ip != rec->ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) 		ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) 		/* Need to check this ip for a direct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) 		if (ftrace_find_rec_direct(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) 	ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) 	if (ftrace_hash_empty(direct_functions) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) 	    direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) 		struct ftrace_hash *new_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) 		int size = ftrace_hash_empty(direct_functions) ? 0 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) 			direct_functions->count + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) 		if (size < 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) 			size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) 		new_hash = dup_hash(direct_functions, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) 		if (!new_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) 		free_hash = direct_functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) 		direct_functions = new_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) 	direct = ftrace_find_direct_func(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) 	if (!direct) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) 		direct = ftrace_alloc_direct_func(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) 		if (!direct) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) 			kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) 	entry->ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) 	entry->direct = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) 	__add_hash_entry(direct_functions, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) 	ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) 		remove_hash_entry(direct_functions, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) 	if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) 		ret = register_ftrace_function(&direct_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) 			ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) 		kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) 		if (!direct->count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) 			list_del_rcu(&direct->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) 			synchronize_rcu_tasks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) 			kfree(direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) 			if (free_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) 				free_ftrace_hash(free_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) 			free_hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) 			ftrace_direct_func_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) 		direct->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179)  out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) 	mutex_unlock(&direct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) 	if (free_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) 		synchronize_rcu_tasks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) 		free_ftrace_hash(free_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) EXPORT_SYMBOL_GPL(register_ftrace_direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) static struct ftrace_func_entry *find_direct_entry(unsigned long *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) 						   struct dyn_ftrace **recp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) 	rec = lookup_rec(*ip, *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) 	if (!rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) 	entry = __ftrace_lookup_ip(direct_functions, rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) 	if (!entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) 		WARN_ON(rec->flags & FTRACE_FL_DIRECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) 	WARN_ON(!(rec->flags & FTRACE_FL_DIRECT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) 	/* Passed in ip just needs to be on the call site */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) 	*ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) 	if (recp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) 		*recp = rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) 	return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) 	struct ftrace_direct_func *direct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) 	int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) 	mutex_lock(&direct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) 	entry = find_direct_entry(&ip, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) 	if (direct_functions->count == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) 		unregister_ftrace_function(&direct_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) 	ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) 	WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) 	remove_hash_entry(direct_functions, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) 	direct = ftrace_find_direct_func(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) 	if (!WARN_ON(!direct)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) 		/* This is the good path (see the ! before WARN) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) 		direct->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) 		WARN_ON(direct->count < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) 		if (!direct->count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) 			list_del_rcu(&direct->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) 			synchronize_rcu_tasks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) 			kfree(direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) 			kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) 			ftrace_direct_func_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252)  out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) 	mutex_unlock(&direct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) static struct ftrace_ops stub_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) 	.func		= ftrace_stub,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264)  * ftrace_modify_direct_caller - modify ftrace nop directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265)  * @entry: The ftrace hash entry of the direct helper for @rec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266)  * @rec: The record representing the function site to patch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267)  * @old_addr: The location that the site at @rec->ip currently calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268)  * @new_addr: The location that the site at @rec->ip should call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270)  * An architecture may overwrite this function to optimize the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271)  * changing of the direct callback on an ftrace nop location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272)  * This is called with the ftrace_lock mutex held, and no other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273)  * ftrace callbacks are on the associated record (@rec). Thus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274)  * it is safe to modify the ftrace record, where it should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275)  * currently calling @old_addr directly, to call @new_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277)  * Safety checks should be made to make sure that the code at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278)  * @rec->ip is currently calling @old_addr. And this must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279)  * also update entry->direct to @new_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) 				       struct dyn_ftrace *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) 				       unsigned long old_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) 				       unsigned long new_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) 	unsigned long ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) 	 * The ftrace_lock was used to determine if the record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) 	 * had more than one registered user to it. If it did,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) 	 * we needed to prevent that from changing to do the quick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) 	 * switch. But if it did not (only a direct caller was attached)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) 	 * then this function is called. But this function can deal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) 	 * with attached callers to the rec that we care about, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) 	 * since this function uses standard ftrace calls that take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) 	 * the ftrace_lock mutex, we need to release it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) 	 * By setting a stub function at the same address, we force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) 	 * the code to call the iterator and the direct_ops helper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) 	 * This means that @ip does not call the direct call, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) 	 * we can simply modify it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) 	ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) 		goto out_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) 	ret = register_ftrace_function(&stub_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) 		ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) 		goto out_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) 	entry->direct = new_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) 	 * By removing the stub, we put back the direct call, calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) 	 * the @new_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) 	unregister_ftrace_function(&stub_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) 	ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326)  out_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333)  * modify_ftrace_direct - Modify an existing direct call to call something else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334)  * @ip: The instruction pointer to modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335)  * @old_addr: The address that the current @ip calls directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336)  * @new_addr: The address that the @ip should call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338)  * This modifies a ftrace direct caller at an instruction pointer without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339)  * having to disable it first. The direct call will switch over to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340)  * @new_addr without missing anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342)  * Returns: zero on success. Non zero on error, which includes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343)  *  -ENODEV : the @ip given has no direct caller attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344)  *  -EINVAL : the @old_addr does not match the current direct caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) int modify_ftrace_direct(unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) 			 unsigned long old_addr, unsigned long new_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) 	struct ftrace_direct_func *direct, *new_direct = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) 	int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) 	mutex_lock(&direct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) 	entry = find_direct_entry(&ip, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) 	ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) 	if (entry->direct != old_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) 	direct = ftrace_find_direct_func(old_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) 	if (WARN_ON(!direct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) 	if (direct->count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) 		new_direct = ftrace_alloc_direct_func(new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) 		if (!new_direct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) 		direct->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) 		new_direct->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) 		direct->addr = new_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) 	 * If there's no other ftrace callback on the rec->ip location,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) 	 * then it can be changed directly by the architecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) 	 * If there is another caller, then we just need to change the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) 	 * direct caller helper to point to @new_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) 	if (ftrace_rec_count(rec) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) 		ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) 		entry->direct = new_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) 	if (unlikely(ret && new_direct)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) 		direct->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) 		list_del_rcu(&new_direct->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) 		synchronize_rcu_tasks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) 		kfree(new_direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) 		ftrace_direct_func_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400)  out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) 	mutex_unlock(&direct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) EXPORT_SYMBOL_GPL(modify_ftrace_direct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409)  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410)  * @ops - the ops to set the filter with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411)  * @ip - the address to add to or remove from the filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412)  * @remove - non zero to remove the ip from the filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413)  * @reset - non zero to reset all filters before applying this filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415)  * Filters denote which functions should be enabled when tracing is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416)  * If @ip is NULL, it failes to update filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) 			 int remove, int reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) 	ftrace_ops_init(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) 	return ftrace_set_addr(ops, ip, remove, reset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427)  * ftrace_ops_set_global_filter - setup ops to use global filters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428)  * @ops - the ops which will use the global filters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430)  * ftrace users who need global function trace filtering should call this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431)  * It can set the global filter only if ops were not initialized before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) 	if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) 	ftrace_ops_init(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) 	ops->func_hash = &global_ops.local_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) 		 int reset, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) 	return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451)  * ftrace_set_filter - set a function to filter on in ftrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452)  * @ops - the ops to set the filter with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453)  * @buf - the string that holds the function filter text.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454)  * @len - the length of the string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455)  * @reset - non zero to reset all filters before applying this filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457)  * Filters denote which functions should be enabled when tracing is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458)  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) 		       int len, int reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) 	ftrace_ops_init(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) 	return ftrace_set_regex(ops, buf, len, reset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) EXPORT_SYMBOL_GPL(ftrace_set_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469)  * ftrace_set_notrace - set a function to not trace in ftrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470)  * @ops - the ops to set the notrace filter with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471)  * @buf - the string that holds the function notrace text.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472)  * @len - the length of the string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473)  * @reset - non zero to reset all filters before applying this filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475)  * Notrace Filters denote which functions should not be enabled when tracing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476)  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477)  * for tracing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) 			int len, int reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) 	ftrace_ops_init(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) 	return ftrace_set_regex(ops, buf, len, reset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) EXPORT_SYMBOL_GPL(ftrace_set_notrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487)  * ftrace_set_global_filter - set a function to filter on with global tracers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488)  * @buf - the string that holds the function filter text.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489)  * @len - the length of the string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490)  * @reset - non zero to reset all filters before applying this filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492)  * Filters denote which functions should be enabled when tracing is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493)  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) 	ftrace_set_regex(&global_ops, buf, len, reset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502)  * ftrace_set_global_notrace - set a function to not trace with global tracers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503)  * @buf - the string that holds the function notrace text.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504)  * @len - the length of the string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505)  * @reset - non zero to reset all filters before applying this filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507)  * Notrace Filters denote which functions should not be enabled when tracing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508)  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509)  * for tracing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) 	ftrace_set_regex(&global_ops, buf, len, reset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518)  * command line interface to allow users to set filters on boot up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) #define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) /* Used by function selftest to not test if filter is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) bool ftrace_filter_param __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) static int __init set_ftrace_notrace(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) 	ftrace_filter_param = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) 	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) __setup("ftrace_notrace=", set_ftrace_notrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) static int __init set_ftrace_filter(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) 	ftrace_filter_param = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) 	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) __setup("ftrace_filter=", set_ftrace_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) static int __init set_graph_function(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) 	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) __setup("ftrace_graph_filter=", set_graph_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) static int __init set_graph_notrace_function(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) 	strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) __setup("ftrace_graph_notrace=", set_graph_notrace_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) static int __init set_graph_max_depth_function(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) 	if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) 	fgraph_max_depth = simple_strtoul(str, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) __setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) static void __init set_ftrace_early_graph(char *buf, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) 	char *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) 	struct ftrace_hash *hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) 	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) 	if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) 	while (buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) 		func = strsep(&buf, ",");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) 		/* we allow only one expression at a time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) 		ret = ftrace_graph_set_hash(hash, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) 			printk(KERN_DEBUG "ftrace: function %s not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) 					  "traceable\n", func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) 		ftrace_graph_hash = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) 		ftrace_graph_notrace_hash = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) 	char *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) 	ftrace_ops_init(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) 	while (buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) 		func = strsep(&buf, ",");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) 		ftrace_set_regex(ops, func, strlen(func), 0, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) static void __init set_ftrace_early_filters(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) 	if (ftrace_filter_buf[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) 		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) 	if (ftrace_notrace_buf[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) 		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) 	if (ftrace_graph_buf[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) 		set_ftrace_early_graph(ftrace_graph_buf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) 	if (ftrace_graph_notrace_buf[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) 		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) int ftrace_regex_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) 	struct seq_file *m = (struct seq_file *)file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) 	struct ftrace_iterator *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) 	struct ftrace_hash **orig_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) 	struct trace_parser *parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) 	int filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) 	if (file->f_mode & FMODE_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) 		iter = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) 		seq_release(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) 		iter = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) 	parser = &iter->parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) 	if (trace_parser_loaded(parser)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) 		int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) 		ftrace_process_regex(iter, parser->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) 				     parser->idx, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) 	trace_parser_put(parser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) 	mutex_lock(&iter->ops->func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) 	if (file->f_mode & FMODE_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) 		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) 		if (filter_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) 			orig_hash = &iter->ops->func_hash->filter_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) 			if (iter->tr && !list_empty(&iter->tr->mod_trace))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) 				iter->hash->flags |= FTRACE_HASH_FL_MOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) 			orig_hash = &iter->ops->func_hash->notrace_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) 		mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) 		ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) 						      iter->hash, filter_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) 		mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) 		/* For read only, the hash is the ops hash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) 		iter->hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) 	mutex_unlock(&iter->ops->func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) 	free_ftrace_hash(iter->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) 	if (iter->tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) 		trace_array_put(iter->tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) 	kfree(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) static const struct file_operations ftrace_avail_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) 	.open = ftrace_avail_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) 	.read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) 	.llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) 	.release = seq_release_private,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) static const struct file_operations ftrace_enabled_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) 	.open = ftrace_enabled_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) 	.read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) 	.llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) 	.release = seq_release_private,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) static const struct file_operations ftrace_filter_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) 	.open = ftrace_filter_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) 	.read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) 	.write = ftrace_filter_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) 	.llseek = tracing_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) 	.release = ftrace_regex_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) static const struct file_operations ftrace_notrace_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) 	.open = ftrace_notrace_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) 	.read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) 	.write = ftrace_notrace_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) 	.llseek = tracing_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) 	.release = ftrace_regex_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) static DEFINE_MUTEX(graph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) enum graph_filter_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) 	GRAPH_FILTER_NOTRACE	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) 	GRAPH_FILTER_FUNCTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) #define FTRACE_GRAPH_EMPTY	((void *)1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) struct ftrace_graph_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) 	struct ftrace_hash		*hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) 	struct ftrace_func_entry	*entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) 	int				idx;   /* for hash table iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) 	enum graph_filter_type		type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) 	struct ftrace_hash		*new_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) 	const struct seq_operations	*seq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) 	struct trace_parser		parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) __g_next(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) 	struct ftrace_graph_data *fgd = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) 	struct ftrace_func_entry *entry = fgd->entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) 	struct hlist_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) 	int i, idx = fgd->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) 	if (*pos >= fgd->hash->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) 	if (entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) 		hlist_for_each_entry_continue(entry, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) 			fgd->entry = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) 			return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) 		idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) 	for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) 		head = &fgd->hash->buckets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) 		hlist_for_each_entry(entry, head, hlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) 			fgd->entry = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) 			fgd->idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) 			return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) g_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) 	(*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) 	return __g_next(m, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) static void *g_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) 	struct ftrace_graph_data *fgd = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) 	mutex_lock(&graph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) 	if (fgd->type == GRAPH_FILTER_FUNCTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) 		fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) 					lockdep_is_held(&graph_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) 		fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) 					lockdep_is_held(&graph_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) 	/* Nothing, tell g_show to print all functions are enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) 	if (ftrace_hash_empty(fgd->hash) && !*pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) 		return FTRACE_GRAPH_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) 	fgd->idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) 	fgd->entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) 	return __g_next(m, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) static void g_stop(struct seq_file *m, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) 	mutex_unlock(&graph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) static int g_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) 	struct ftrace_func_entry *entry = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) 	if (entry == FTRACE_GRAPH_EMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) 		struct ftrace_graph_data *fgd = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) 		if (fgd->type == GRAPH_FILTER_FUNCTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) 			seq_puts(m, "#### all functions enabled ####\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) 			seq_puts(m, "#### no functions disabled ####\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) 	seq_printf(m, "%ps\n", (void *)entry->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) static const struct seq_operations ftrace_graph_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) 	.start = g_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) 	.next = g_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) 	.stop = g_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) 	.show = g_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) __ftrace_graph_open(struct inode *inode, struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) 		    struct ftrace_graph_data *fgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) 	struct ftrace_hash *new_hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) 	ret = security_locked_down(LOCKDOWN_TRACEFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) 	if (file->f_mode & FMODE_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) 		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) 		if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) 		if (file->f_flags & O_TRUNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) 			new_hash = alloc_ftrace_hash(size_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) 			new_hash = alloc_and_copy_ftrace_hash(size_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) 							      fgd->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) 		if (!new_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) 	if (file->f_mode & FMODE_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) 		ret = seq_open(file, &ftrace_graph_seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) 			struct seq_file *m = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) 			m->private = fgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) 			/* Failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) 			free_ftrace_hash(new_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) 			new_hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) 		file->private_data = fgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) 	if (ret < 0 && file->f_mode & FMODE_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) 		trace_parser_put(&fgd->parser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) 	fgd->new_hash = new_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) 	 * All uses of fgd->hash must be taken with the graph_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) 	 * held. The graph_lock is going to be released, so force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) 	 * fgd->hash to be reinitialized when it is taken again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879) 	fgd->hash = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) ftrace_graph_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) 	struct ftrace_graph_data *fgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) 	if (fgd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) 	mutex_lock(&graph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) 	fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) 					lockdep_is_held(&graph_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901) 	fgd->type = GRAPH_FILTER_FUNCTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) 	fgd->seq_ops = &ftrace_graph_seq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904) 	ret = __ftrace_graph_open(inode, file, fgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) 		kfree(fgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) 	mutex_unlock(&graph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) ftrace_graph_notrace_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) 	struct ftrace_graph_data *fgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) 	if (fgd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925) 	mutex_lock(&graph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927) 	fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) 					lockdep_is_held(&graph_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) 	fgd->type = GRAPH_FILTER_NOTRACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) 	fgd->seq_ops = &ftrace_graph_seq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) 	ret = __ftrace_graph_open(inode, file, fgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) 		kfree(fgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) 	mutex_unlock(&graph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) ftrace_graph_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) 	struct ftrace_graph_data *fgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) 	struct ftrace_hash *old_hash, *new_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) 	struct trace_parser *parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948) 	if (file->f_mode & FMODE_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) 		struct seq_file *m = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) 		fgd = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) 		seq_release(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954) 		fgd = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) 	if (file->f_mode & FMODE_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) 		parser = &fgd->parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) 		if (trace_parser_loaded((parser))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) 			ret = ftrace_graph_set_hash(fgd->new_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) 						    parser->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) 		trace_parser_put(parser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) 		new_hash = __ftrace_hash_move(fgd->new_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) 		if (!new_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) 		mutex_lock(&graph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977) 		if (fgd->type == GRAPH_FILTER_FUNCTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978) 			old_hash = rcu_dereference_protected(ftrace_graph_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979) 					lockdep_is_held(&graph_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) 			rcu_assign_pointer(ftrace_graph_hash, new_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) 			old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) 					lockdep_is_held(&graph_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) 			rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) 		mutex_unlock(&graph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) 		 * We need to do a hard force of sched synchronization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) 		 * This is because we use preempt_disable() to do RCU, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) 		 * the function tracers can be called where RCU is not watching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) 		 * (like before user_exit()). We can not rely on the RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) 		 * infrastructure to do the synchronization, thus we must do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) 		 * ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) 		synchronize_rcu_tasks_rude();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) 		free_ftrace_hash(old_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) 	free_ftrace_hash(fgd->new_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) 	kfree(fgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) 	struct ftrace_glob func_g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) 	struct ftrace_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) 	int fail = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) 	int not;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) 	/* decode regex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020) 	func_g.type = filter_parse_regex(buffer, strlen(buffer),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) 					 &func_g.search, &not);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) 	func_g.len = strlen(func_g.search);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) 	if (unlikely(ftrace_disabled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) 		mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) 	do_for_each_ftrace_rec(pg, rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) 		if (rec->flags & FTRACE_FL_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) 		if (ftrace_match_record(rec, &func_g, NULL, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038) 			entry = ftrace_lookup_ip(hash, rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) 			if (!not) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) 				fail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) 				if (entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) 				if (add_hash_entry(hash, rec->ip) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) 				if (entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) 					free_hash_entry(hash, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) 					fail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054) 	} while_for_each_ftrace_rec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058) 	if (fail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) ftrace_graph_write(struct file *file, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) 		   size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) 	ssize_t read, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) 	struct ftrace_graph_data *fgd = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) 	struct trace_parser *parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072) 	if (!cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) 	/* Read mode uses seq functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076) 	if (file->f_mode & FMODE_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) 		struct seq_file *m = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) 		fgd = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) 	parser = &fgd->parser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) 	read = trace_get_user(parser, ubuf, cnt, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) 	if (read >= 0 && trace_parser_loaded(parser) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) 	    !trace_parser_cont(parser)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088) 		ret = ftrace_graph_set_hash(fgd->new_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) 					    parser->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) 		trace_parser_clear(parser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) 		ret = read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) static const struct file_operations ftrace_graph_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) 	.open		= ftrace_graph_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) 	.read		= seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) 	.write		= ftrace_graph_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) 	.llseek		= tracing_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) 	.release	= ftrace_graph_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) static const struct file_operations ftrace_graph_notrace_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108) 	.open		= ftrace_graph_notrace_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109) 	.read		= seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110) 	.write		= ftrace_graph_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111) 	.llseek		= tracing_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) 	.release	= ftrace_graph_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116) void ftrace_create_filter_files(struct ftrace_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) 				struct dentry *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120) 	trace_create_file("set_ftrace_filter", 0644, parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121) 			  ops, &ftrace_filter_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123) 	trace_create_file("set_ftrace_notrace", 0644, parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124) 			  ops, &ftrace_notrace_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128)  * The name "destroy_filter_files" is really a misnomer. Although
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129)  * in the future, it may actually delete the files, but this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130)  * really intended to make sure the ops passed in are disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131)  * and that when this function returns, the caller is free to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132)  * free the ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134)  * The "destroy" name is only to match the "create" name that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135)  * should be paired with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137) void ftrace_destroy_filter_files(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) 	if (ops->flags & FTRACE_OPS_FL_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) 		ftrace_shutdown(ops, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142) 	ops->flags |= FTRACE_OPS_FL_DELETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143) 	ftrace_free_filter(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150) 	trace_create_file("available_filter_functions", 0444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151) 			d_tracer, NULL, &ftrace_avail_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) 	trace_create_file("enabled_functions", 0444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154) 			d_tracer, NULL, &ftrace_enabled_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) 	ftrace_create_filter_files(&global_ops, d_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159) 	trace_create_file("set_graph_function", 0644, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160) 				    NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161) 				    &ftrace_graph_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) 	trace_create_file("set_graph_notrace", 0644, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) 				    NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164) 				    &ftrace_graph_notrace_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170) static int ftrace_cmp_ips(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172) 	const unsigned long *ipa = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) 	const unsigned long *ipb = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) 	if (*ipa > *ipb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177) 	if (*ipa < *ipb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182) static int ftrace_process_locs(struct module *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183) 			       unsigned long *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) 			       unsigned long *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186) 	struct ftrace_page *start_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) 	struct ftrace_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) 	unsigned long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190) 	unsigned long *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) 	unsigned long flags = 0; /* Shut up gcc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) 	count = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197) 	if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200) 	sort(start, count, sizeof(*start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) 	     ftrace_cmp_ips, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203) 	start_pg = ftrace_allocate_pages(count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204) 	if (!start_pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210) 	 * Core and each module needs their own pages, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) 	 * modules will free them when they are removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212) 	 * Force a new page to be allocated for modules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214) 	if (!mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) 		WARN_ON(ftrace_pages || ftrace_pages_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216) 		/* First initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217) 		ftrace_pages = ftrace_pages_start = start_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219) 		if (!ftrace_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) 		if (WARN_ON(ftrace_pages->next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223) 			/* Hmm, we have free pages? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224) 			while (ftrace_pages->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225) 				ftrace_pages = ftrace_pages->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228) 		ftrace_pages->next = start_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) 	p = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232) 	pg = start_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) 	while (p < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234) 		addr = ftrace_call_adjust(*p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236) 		 * Some architecture linkers will pad between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) 		 * the different mcount_loc sections of different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) 		 * object files to satisfy alignments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) 		 * Skip any NULL pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) 		if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) 		if (pg->index == pg->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245) 			/* We should have allocated enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246) 			if (WARN_ON(!pg->next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248) 			pg = pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251) 		rec = &pg->records[pg->index++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252) 		rec->ip = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) 	/* We should have used all pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256) 	WARN_ON(pg->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) 	/* Assign the last page to ftrace_pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259) 	ftrace_pages = pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262) 	 * We only need to disable interrupts on start up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263) 	 * because we are modifying code that an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264) 	 * may execute, and the modification is not atomic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265) 	 * But for modules, nothing runs the code we modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) 	 * until we are finished with it, and there's no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) 	 * reason to cause large interrupt latencies while we do it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269) 	if (!mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) 		local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271) 	ftrace_update_code(mod, start_pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272) 	if (!mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) struct ftrace_mod_func {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) 	struct list_head	list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) 	char			*name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284) 	unsigned long		ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285) 	unsigned int		size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) struct ftrace_mod_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289) 	struct rcu_head		rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290) 	struct list_head	list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) 	struct module		*mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292) 	unsigned long		start_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293) 	unsigned long		end_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294) 	struct list_head	funcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295) 	unsigned int		num_funcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298) static int ftrace_get_trampoline_kallsym(unsigned int symnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) 					 unsigned long *value, char *type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300) 					 char *name, char *module_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301) 					 int *exported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) 	struct ftrace_ops *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) 	list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306) 		if (!op->trampoline || symnum--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308) 		*value = op->trampoline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309) 		*type = 't';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) 		strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) 		strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312) 		*exported = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316) 	return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321) #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323) static LIST_HEAD(ftrace_mod_maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325) static int referenced_filters(struct dyn_ftrace *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327) 	struct ftrace_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328) 	int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330) 	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331) 		if (ops_references_rec(ops, rec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) 			if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334) 			if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336) 			cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337) 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338) 				rec->flags |= FTRACE_FL_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339) 			if (cnt == 1 && ops->trampoline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340) 				rec->flags |= FTRACE_FL_TRAMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) 				rec->flags &= ~FTRACE_FL_TRAMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346) 	return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350) clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356) 	if (ftrace_hash_empty(hash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359) 	for (i = 0; i < pg->index; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360) 		rec = &pg->records[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) 		entry = __ftrace_lookup_ip(hash, rec->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363) 		 * Do not allow this rec to match again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364) 		 * Yeah, it may waste some memory, but will be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365) 		 * if/when the hash is modified again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367) 		if (entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368) 			entry->ip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372) /* Clear any records from hashs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373) static void clear_mod_from_hashes(struct ftrace_page *pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375) 	struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377) 	mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379) 		if (!tr->ops || !tr->ops->func_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381) 		mutex_lock(&tr->ops->func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382) 		clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383) 		clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384) 		mutex_unlock(&tr->ops->func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386) 	mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) static void ftrace_free_mod_map(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391) 	struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392) 	struct ftrace_mod_func *mod_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393) 	struct ftrace_mod_func *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395) 	/* All the contents of mod_map are now not visible to readers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396) 	list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397) 		kfree(mod_func->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) 		list_del(&mod_func->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399) 		kfree(mod_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402) 	kfree(mod_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405) void ftrace_release_mod(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407) 	struct ftrace_mod_map *mod_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408) 	struct ftrace_mod_map *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) 	struct ftrace_page **last_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411) 	struct ftrace_page *tmp_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) 	struct ftrace_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413) 	int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417) 	if (ftrace_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420) 	list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421) 		if (mod_map->mod == mod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422) 			list_del_rcu(&mod_map->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423) 			call_rcu(&mod_map->rcu, ftrace_free_mod_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429) 	 * Each module has its own ftrace_pages, remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430) 	 * them from the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432) 	last_pg = &ftrace_pages_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433) 	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434) 		rec = &pg->records[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435) 		if (within_module_core(rec->ip, mod) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436) 		    within_module_init(rec->ip, mod)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438) 			 * As core pages are first, the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439) 			 * page should never be a module page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441) 			if (WARN_ON(pg == ftrace_pages_start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442) 				goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444) 			/* Check if we are deleting the last page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445) 			if (pg == ftrace_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446) 				ftrace_pages = next_to_ftrace_page(last_pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) 			ftrace_update_tot_cnt -= pg->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449) 			*last_pg = pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) 			pg->next = tmp_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452) 			tmp_page = pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454) 			last_pg = &pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456)  out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459) 	for (pg = tmp_page; pg; pg = tmp_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461) 		/* Needs to be called outside of ftrace_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462) 		clear_mod_from_hashes(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464) 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465) 		if (order >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466) 			free_pages((unsigned long)pg->records, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467) 		tmp_page = pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468) 		kfree(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469) 		ftrace_number_of_pages -= 1 << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470) 		ftrace_number_of_groups--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474) void ftrace_module_enable(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477) 	struct ftrace_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481) 	if (ftrace_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485) 	 * If the tracing is enabled, go ahead and enable the record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487) 	 * The reason not to enable the record immediately is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488) 	 * inherent check of ftrace_make_nop/ftrace_make_call for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489) 	 * correct previous instructions.  Making first the NOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490) 	 * conversion puts the module to the correct state, thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491) 	 * passing the ftrace_make_call check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493) 	 * We also delay this to after the module code already set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) 	 * text to read-only, as we now need to set it back to read-write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495) 	 * so that we can modify the text.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497) 	if (ftrace_start_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498) 		ftrace_arch_code_modify_prepare();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500) 	do_for_each_ftrace_rec(pg, rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501) 		int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503) 		 * do_for_each_ftrace_rec() is a double loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504) 		 * module text shares the pg. If a record is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505) 		 * not part of this module, then skip this pg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506) 		 * which the "break" will do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508) 		if (!within_module_core(rec->ip, mod) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509) 		    !within_module_init(rec->ip, mod))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512) 		cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) 		 * When adding a module, we need to check if tracers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516) 		 * currently enabled and if they are, and can trace this record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517) 		 * we need to enable the module functions as well as update the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518) 		 * reference counts for those function records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520) 		if (ftrace_start_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521) 			cnt += referenced_filters(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523) 		rec->flags &= ~FTRACE_FL_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) 		rec->flags += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526) 		if (ftrace_start_up && cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527) 			int failed = __ftrace_replace_code(rec, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528) 			if (failed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529) 				ftrace_bug(failed, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) 				goto out_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534) 	} while_for_each_ftrace_rec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536)  out_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537) 	if (ftrace_start_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538) 		ftrace_arch_code_modify_post_process();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540)  out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543) 	process_cached_mods(mod->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546) void ftrace_module_init(struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) 	if (ftrace_disabled || !mod->num_ftrace_callsites)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) 	ftrace_process_locs(mod, mod->ftrace_callsites,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552) 			    mod->ftrace_callsites + mod->num_ftrace_callsites);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555) static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556) 				struct dyn_ftrace *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558) 	struct ftrace_mod_func *mod_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559) 	unsigned long symsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561) 	char str[KSYM_SYMBOL_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562) 	char *modname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563) 	const char *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) 	ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569) 	mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570) 	if (!mod_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573) 	mod_func->name = kstrdup(str, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574) 	if (!mod_func->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575) 		kfree(mod_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579) 	mod_func->ip = rec->ip - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580) 	mod_func->size = symsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582) 	mod_map->num_funcs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584) 	list_add_rcu(&mod_func->list, &mod_map->funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587) static struct ftrace_mod_map *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588) allocate_ftrace_mod_map(struct module *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589) 			unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591) 	struct ftrace_mod_map *mod_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593) 	mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594) 	if (!mod_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597) 	mod_map->mod = mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598) 	mod_map->start_addr = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599) 	mod_map->end_addr = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600) 	mod_map->num_funcs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602) 	INIT_LIST_HEAD_RCU(&mod_map->funcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604) 	list_add_rcu(&mod_map->list, &ftrace_mod_maps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606) 	return mod_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609) static const char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610) ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611) 			   unsigned long addr, unsigned long *size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612) 			   unsigned long *off, char *sym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614) 	struct ftrace_mod_func *found_func =  NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) 	struct ftrace_mod_func *mod_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617) 	list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618) 		if (addr >= mod_func->ip &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619) 		    addr < mod_func->ip + mod_func->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620) 			found_func = mod_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625) 	if (found_func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626) 		if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627) 			*size = found_func->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628) 		if (off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629) 			*off = addr - found_func->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) 		if (sym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631) 			strlcpy(sym, found_func->name, KSYM_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633) 		return found_func->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639) const char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640) ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641) 		   unsigned long *off, char **modname, char *sym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643) 	struct ftrace_mod_map *mod_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644) 	const char *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646) 	/* mod_map is freed via call_rcu() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648) 	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) 		ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651) 			if (modname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652) 				*modname = mod_map->mod->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661) int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662) 			   char *type, char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663) 			   char *module_name, int *exported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665) 	struct ftrace_mod_map *mod_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666) 	struct ftrace_mod_func *mod_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670) 	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672) 		if (symnum >= mod_map->num_funcs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673) 			symnum -= mod_map->num_funcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677) 		list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) 			if (symnum > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679) 				symnum--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683) 			*value = mod_func->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684) 			*type = 'T';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685) 			strlcpy(name, mod_func->name, KSYM_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686) 			strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687) 			*exported = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) 			preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) 	ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695) 					    module_name, exported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701) static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702) 				struct dyn_ftrace *rec) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703) static inline struct ftrace_mod_map *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) allocate_ftrace_mod_map(struct module *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705) 			unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709) int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710) 			   char *type, char *name, char *module_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711) 			   int *exported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) 	ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717) 					    module_name, exported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721) #endif /* CONFIG_MODULES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) struct ftrace_init_func {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725) 	unsigned long ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728) /* Clear any init ips from hashes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730) clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732) 	struct ftrace_func_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734) 	entry = ftrace_lookup_ip(hash, func->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736) 	 * Do not allow this rec to match again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737) 	 * Yeah, it may waste some memory, but will be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738) 	 * if/when the hash is modified again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740) 	if (entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741) 		entry->ip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745) clear_func_from_hashes(struct ftrace_init_func *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747) 	struct trace_array *tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) 	mutex_lock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750) 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751) 		if (!tr->ops || !tr->ops->func_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753) 		mutex_lock(&tr->ops->func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) 		clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755) 		clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756) 		mutex_unlock(&tr->ops->func_hash->regex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758) 	mutex_unlock(&trace_types_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761) static void add_to_clear_hash_list(struct list_head *clear_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762) 				   struct dyn_ftrace *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764) 	struct ftrace_init_func *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) 	func = kmalloc(sizeof(*func), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767) 	if (!func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768) 		MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772) 	func->ip = rec->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773) 	list_add(&func->list, clear_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778) 	unsigned long start = (unsigned long)(start_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779) 	unsigned long end = (unsigned long)(end_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780) 	struct ftrace_page **last_pg = &ftrace_pages_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781) 	struct ftrace_page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) 	struct dyn_ftrace *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6783) 	struct dyn_ftrace key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6784) 	struct ftrace_mod_map *mod_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6785) 	struct ftrace_init_func *func, *func_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6786) 	struct list_head clear_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6787) 	int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6789) 	INIT_LIST_HEAD(&clear_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6791) 	key.ip = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6792) 	key.flags = end;	/* overload flags, as it is unsigned long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6794) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6796) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6797) 	 * If we are freeing module init memory, then check if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6798) 	 * any tracer is active. If so, we need to save a mapping of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6799) 	 * the module functions being freed with the address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6800) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6801) 	if (mod && ftrace_ops_list != &ftrace_list_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6802) 		mod_map = allocate_ftrace_mod_map(mod, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6804) 	for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6805) 		if (end < pg->records[0].ip ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6806) 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6807) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6808)  again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6809) 		rec = bsearch(&key, pg->records, pg->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6810) 			      sizeof(struct dyn_ftrace),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6811) 			      ftrace_cmp_recs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6812) 		if (!rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6813) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6815) 		/* rec will be cleared from hashes after ftrace_lock unlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6816) 		add_to_clear_hash_list(&clear_hash, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6818) 		if (mod_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6819) 			save_ftrace_mod_rec(mod_map, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6821) 		pg->index--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6822) 		ftrace_update_tot_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6823) 		if (!pg->index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6824) 			*last_pg = pg->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6825) 			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6826) 			if (order >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6827) 				free_pages((unsigned long)pg->records, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6828) 			ftrace_number_of_pages -= 1 << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6829) 			ftrace_number_of_groups--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6830) 			kfree(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6831) 			pg = container_of(last_pg, struct ftrace_page, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6832) 			if (!(*last_pg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6833) 				ftrace_pages = pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6834) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6835) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6836) 		memmove(rec, rec + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6837) 			(pg->index - (rec - pg->records)) * sizeof(*rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6838) 		/* More than one function may be in this block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6839) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6840) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6841) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6843) 	list_for_each_entry_safe(func, func_next, &clear_hash, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6844) 		clear_func_from_hashes(func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6845) 		kfree(func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6849) void __init ftrace_free_init_mem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6851) 	void *start = (void *)(&__init_begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6852) 	void *end = (void *)(&__init_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6854) 	ftrace_free_mem(NULL, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6857) void __init ftrace_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6859) 	extern unsigned long __start_mcount_loc[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6860) 	extern unsigned long __stop_mcount_loc[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6861) 	unsigned long count, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6862) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6864) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6865) 	ret = ftrace_dyn_arch_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6866) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6867) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6868) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6870) 	count = __stop_mcount_loc - __start_mcount_loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6871) 	if (!count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6872) 		pr_info("ftrace: No functions to be traced?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6873) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6874) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6876) 	pr_info("ftrace: allocating %ld entries in %ld pages\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6877) 		count, count / ENTRIES_PER_PAGE + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6879) 	last_ftrace_enabled = ftrace_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6881) 	ret = ftrace_process_locs(NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6882) 				  __start_mcount_loc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6883) 				  __stop_mcount_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6885) 	pr_info("ftrace: allocated %ld pages with %ld groups\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6886) 		ftrace_number_of_pages, ftrace_number_of_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6888) 	set_ftrace_early_filters();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6890) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6891)  failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6892) 	ftrace_disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6895) /* Do nothing if arch does not support this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6896) void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6900) static void ftrace_update_trampoline(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6902) 	unsigned long trampoline = ops->trampoline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6904) 	arch_ftrace_update_trampoline(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6905) 	if (ops->trampoline && ops->trampoline != trampoline &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6906) 	    (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6907) 		/* Add to kallsyms before the perf events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6908) 		ftrace_add_trampoline_to_kallsyms(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6909) 		perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6910) 				   ops->trampoline, ops->trampoline_size, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6911) 				   FTRACE_TRAMPOLINE_SYM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6912) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6913) 		 * Record the perf text poke event after the ksymbol register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6914) 		 * event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6915) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6916) 		perf_event_text_poke((void *)ops->trampoline, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6917) 				     (void *)ops->trampoline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6918) 				     ops->trampoline_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6919) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6922) void ftrace_init_trace_array(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6924) 	INIT_LIST_HEAD(&tr->func_probes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6925) 	INIT_LIST_HEAD(&tr->mod_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6926) 	INIT_LIST_HEAD(&tr->mod_notrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6928) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6930) struct ftrace_ops global_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6931) 	.func			= ftrace_stub,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6932) 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6933) 				  FTRACE_OPS_FL_INITIALIZED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6934) 				  FTRACE_OPS_FL_PID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6935) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6937) static int __init ftrace_nodyn_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6939) 	ftrace_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6940) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6942) core_initcall(ftrace_nodyn_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6944) static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6945) static inline void ftrace_startup_enable(int command) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6946) static inline void ftrace_startup_all(int command) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6948) # define ftrace_startup_sysctl()	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6949) # define ftrace_shutdown_sysctl()	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6951) static void ftrace_update_trampoline(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6955) #endif /* CONFIG_DYNAMIC_FTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6957) __init void ftrace_init_global_array_ops(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6959) 	tr->ops = &global_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6960) 	tr->ops->private = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6961) 	ftrace_init_trace_array(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6964) void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6966) 	/* If we filter on pids, update to use the pid function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6967) 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6968) 		if (WARN_ON(tr->ops->func != ftrace_stub))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6969) 			printk("ftrace ops had %pS for function\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6970) 			       tr->ops->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6971) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6972) 	tr->ops->func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6973) 	tr->ops->private = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6976) void ftrace_reset_array_ops(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6978) 	tr->ops->func = ftrace_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6981) static nokprobe_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6982) __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6983) 		       struct ftrace_ops *ignored, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6985) 	struct ftrace_ops *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6986) 	int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6988) 	bit = trace_test_and_set_recursion(TRACE_LIST_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6989) 	if (bit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6990) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6992) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6993) 	 * Some of the ops may be dynamically allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6994) 	 * they must be freed after a synchronize_rcu().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6995) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6996) 	preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6998) 	do_for_each_ftrace_op(op, ftrace_ops_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6999) 		/* Stub functions don't need to be called nor tested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7000) 		if (op->flags & FTRACE_OPS_FL_STUB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7001) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7002) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7003) 		 * Check the following for each ops before calling their func:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7004) 		 *  if RCU flag is set, then rcu_is_watching() must be true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7005) 		 *  if PER_CPU is set, then ftrace_function_local_disable()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7006) 		 *                          must be false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7007) 		 *  Otherwise test if the ip matches the ops filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7008) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7009) 		 * If any of the above fails then the op->func() is not executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7010) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7011) 		if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7012) 		    ftrace_ops_test(op, ip, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7013) 			if (FTRACE_WARN_ON(!op->func)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7014) 				pr_warn("op=%p %pS\n", op, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7015) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7016) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7017) 			op->func(ip, parent_ip, op, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7018) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7019) 	} while_for_each_ftrace_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7020) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7021) 	preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7022) 	trace_clear_recursion(bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7026)  * Some archs only support passing ip and parent_ip. Even though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7027)  * the list function ignores the op parameter, we do not want any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7028)  * C side effects, where a function is called without the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7029)  * sending a third parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7030)  * Archs are to support both the regs and ftrace_ops at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7031)  * If they support ftrace_ops, it is assumed they support regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7032)  * If call backs want to use regs, they must either check for regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7033)  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7034)  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7035)  * An architecture can pass partial regs with ftrace_ops and still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7036)  * set the ARCH_SUPPORTS_FTRACE_OPS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7037)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7038) #if ARCH_SUPPORTS_FTRACE_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7039) static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7040) 				 struct ftrace_ops *op, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7042) 	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7044) NOKPROBE_SYMBOL(ftrace_ops_list_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7045) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7046) static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7048) 	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7050) NOKPROBE_SYMBOL(ftrace_ops_no_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7051) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7053) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7054)  * If there's only one function registered but it does not support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7055)  * recursion, needs RCU protection and/or requires per cpu handling, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7056)  * this function will be called by the mcount trampoline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7057)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7058) static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7059) 				   struct ftrace_ops *op, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7061) 	int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7063) 	bit = trace_test_and_set_recursion(TRACE_LIST_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7064) 	if (bit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7065) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7067) 	preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7069) 	if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7070) 		op->func(ip, parent_ip, op, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7072) 	preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7073) 	trace_clear_recursion(bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7075) NOKPROBE_SYMBOL(ftrace_ops_assist_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7077) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7078)  * ftrace_ops_get_func - get the function a trampoline should call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7079)  * @ops: the ops to get the function for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7080)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7081)  * Normally the mcount trampoline will call the ops->func, but there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7082)  * are times that it should not. For example, if the ops does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7083)  * have its own recursion protection, then it should call the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7084)  * ftrace_ops_assist_func() instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7085)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7086)  * Returns the function that the trampoline should call for @ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7087)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7088) ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7090) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7091) 	 * If the function does not handle recursion, needs to be RCU safe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7092) 	 * or does per cpu logic, then we need to call the assist handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7093) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7094) 	if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7095) 	    ops->flags & FTRACE_OPS_FL_RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7096) 		return ftrace_ops_assist_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7098) 	return ops->func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7101) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7102) ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7103) 		    struct task_struct *prev, struct task_struct *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7105) 	struct trace_array *tr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7106) 	struct trace_pid_list *pid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7107) 	struct trace_pid_list *no_pid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7109) 	pid_list = rcu_dereference_sched(tr->function_pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7110) 	no_pid_list = rcu_dereference_sched(tr->function_no_pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7112) 	if (trace_ignore_this_task(pid_list, no_pid_list, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7113) 		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7114) 			       FTRACE_PID_IGNORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7115) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7116) 		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7117) 			       next->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7120) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7121) ftrace_pid_follow_sched_process_fork(void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7122) 				     struct task_struct *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7123) 				     struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7125) 	struct trace_pid_list *pid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7126) 	struct trace_array *tr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7128) 	pid_list = rcu_dereference_sched(tr->function_pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7129) 	trace_filter_add_remove_task(pid_list, self, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7131) 	pid_list = rcu_dereference_sched(tr->function_no_pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7132) 	trace_filter_add_remove_task(pid_list, self, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7135) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7136) ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7138) 	struct trace_pid_list *pid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7139) 	struct trace_array *tr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7141) 	pid_list = rcu_dereference_sched(tr->function_pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7142) 	trace_filter_add_remove_task(pid_list, NULL, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7144) 	pid_list = rcu_dereference_sched(tr->function_no_pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7145) 	trace_filter_add_remove_task(pid_list, NULL, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7148) void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7150) 	if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7151) 		register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7152) 						  tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7153) 		register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7154) 						  tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7155) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7156) 		unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7157) 						    tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7158) 		unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7159) 						    tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7163) static void clear_ftrace_pids(struct trace_array *tr, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7165) 	struct trace_pid_list *pid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7166) 	struct trace_pid_list *no_pid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7167) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7169) 	pid_list = rcu_dereference_protected(tr->function_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7170) 					     lockdep_is_held(&ftrace_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7171) 	no_pid_list = rcu_dereference_protected(tr->function_no_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7172) 						lockdep_is_held(&ftrace_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7174) 	/* Make sure there's something to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7175) 	if (!pid_type_enabled(type, pid_list, no_pid_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7176) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7178) 	/* See if the pids still need to be checked after this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7179) 	if (!still_need_pid_events(type, pid_list, no_pid_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7180) 		unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7181) 		for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7182) 			per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7185) 	if (type & TRACE_PIDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7186) 		rcu_assign_pointer(tr->function_pids, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7188) 	if (type & TRACE_NO_PIDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7189) 		rcu_assign_pointer(tr->function_no_pids, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7191) 	/* Wait till all users are no longer using pid filtering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7192) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7194) 	if ((type & TRACE_PIDS) && pid_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7195) 		trace_free_pid_list(pid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7197) 	if ((type & TRACE_NO_PIDS) && no_pid_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7198) 		trace_free_pid_list(no_pid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7201) void ftrace_clear_pids(struct trace_array *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7203) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7205) 	clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7207) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7210) static void ftrace_pid_reset(struct trace_array *tr, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7212) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7213) 	clear_ftrace_pids(tr, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7215) 	ftrace_update_pid_func();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7216) 	ftrace_startup_all(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7218) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7221) /* Greater than any max PID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7222) #define FTRACE_NO_PIDS		(void *)(PID_MAX_LIMIT + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7224) static void *fpid_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7225) 	__acquires(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7227) 	struct trace_pid_list *pid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7228) 	struct trace_array *tr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7230) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7231) 	rcu_read_lock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7233) 	pid_list = rcu_dereference_sched(tr->function_pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7235) 	if (!pid_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7236) 		return !(*pos) ? FTRACE_NO_PIDS : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7238) 	return trace_pid_start(pid_list, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7241) static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7243) 	struct trace_array *tr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7244) 	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7246) 	if (v == FTRACE_NO_PIDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7247) 		(*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7248) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7249) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7250) 	return trace_pid_next(pid_list, v, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7253) static void fpid_stop(struct seq_file *m, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7254) 	__releases(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7256) 	rcu_read_unlock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7257) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7260) static int fpid_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7262) 	if (v == FTRACE_NO_PIDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7263) 		seq_puts(m, "no pid\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7264) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7267) 	return trace_pid_show(m, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7270) static const struct seq_operations ftrace_pid_sops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7271) 	.start = fpid_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7272) 	.next = fpid_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7273) 	.stop = fpid_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7274) 	.show = fpid_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7275) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7277) static void *fnpid_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7278) 	__acquires(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7280) 	struct trace_pid_list *pid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7281) 	struct trace_array *tr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7283) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7284) 	rcu_read_lock_sched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7286) 	pid_list = rcu_dereference_sched(tr->function_no_pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7288) 	if (!pid_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7289) 		return !(*pos) ? FTRACE_NO_PIDS : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7291) 	return trace_pid_start(pid_list, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7294) static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7296) 	struct trace_array *tr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7297) 	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7299) 	if (v == FTRACE_NO_PIDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7300) 		(*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7301) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7302) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7303) 	return trace_pid_next(pid_list, v, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7306) static const struct seq_operations ftrace_no_pid_sops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7307) 	.start = fnpid_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7308) 	.next = fnpid_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7309) 	.stop = fpid_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7310) 	.show = fpid_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7311) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7313) static int pid_open(struct inode *inode, struct file *file, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7315) 	const struct seq_operations *seq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7316) 	struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7317) 	struct seq_file *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7318) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7320) 	ret = tracing_check_open_get_tr(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7321) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7322) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7324) 	if ((file->f_mode & FMODE_WRITE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7325) 	    (file->f_flags & O_TRUNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7326) 		ftrace_pid_reset(tr, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7328) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7329) 	case TRACE_PIDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7330) 		seq_ops = &ftrace_pid_sops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7331) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7332) 	case TRACE_NO_PIDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7333) 		seq_ops = &ftrace_no_pid_sops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7334) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7335) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7336) 		trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7337) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7338) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7341) 	ret = seq_open(file, seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7342) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7343) 		trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7344) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7345) 		m = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7346) 		/* copy tr over to seq ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7347) 		m->private = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7350) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7353) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7354) ftrace_pid_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7356) 	return pid_open(inode, file, TRACE_PIDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7359) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7360) ftrace_no_pid_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7362) 	return pid_open(inode, file, TRACE_NO_PIDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7365) static void ignore_task_cpu(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7367) 	struct trace_array *tr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7368) 	struct trace_pid_list *pid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7369) 	struct trace_pid_list *no_pid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7371) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7372) 	 * This function is called by on_each_cpu() while the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7373) 	 * event_mutex is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7374) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7375) 	pid_list = rcu_dereference_protected(tr->function_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7376) 					     mutex_is_locked(&ftrace_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7377) 	no_pid_list = rcu_dereference_protected(tr->function_no_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7378) 						mutex_is_locked(&ftrace_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7380) 	if (trace_ignore_this_task(pid_list, no_pid_list, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7381) 		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7382) 			       FTRACE_PID_IGNORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7383) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7384) 		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7385) 			       current->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7388) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7389) pid_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7390) 	  size_t cnt, loff_t *ppos, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7392) 	struct seq_file *m = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7393) 	struct trace_array *tr = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7394) 	struct trace_pid_list *filtered_pids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7395) 	struct trace_pid_list *other_pids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7396) 	struct trace_pid_list *pid_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7397) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7399) 	if (!cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7400) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7402) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7404) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7405) 	case TRACE_PIDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7406) 		filtered_pids = rcu_dereference_protected(tr->function_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7407) 					     lockdep_is_held(&ftrace_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7408) 		other_pids = rcu_dereference_protected(tr->function_no_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7409) 					     lockdep_is_held(&ftrace_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7410) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7411) 	case TRACE_NO_PIDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7412) 		filtered_pids = rcu_dereference_protected(tr->function_no_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7413) 					     lockdep_is_held(&ftrace_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7414) 		other_pids = rcu_dereference_protected(tr->function_pids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7415) 					     lockdep_is_held(&ftrace_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7416) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7417) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7418) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7419) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7420) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7423) 	ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7424) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7425) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7427) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7428) 	case TRACE_PIDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7429) 		rcu_assign_pointer(tr->function_pids, pid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7430) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7431) 	case TRACE_NO_PIDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7432) 		rcu_assign_pointer(tr->function_no_pids, pid_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7433) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7437) 	if (filtered_pids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7438) 		synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7439) 		trace_free_pid_list(filtered_pids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7440) 	} else if (pid_list && !other_pids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7441) 		/* Register a probe to set whether to ignore the tracing of a task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7442) 		register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7445) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7446) 	 * Ignoring of pids is done at task switch. But we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7447) 	 * check for those tasks that are currently running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7448) 	 * Always do this in case a pid was appended or removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7449) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7450) 	on_each_cpu(ignore_task_cpu, tr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7452) 	ftrace_update_pid_func();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7453) 	ftrace_startup_all(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7454)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7455) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7457) 	if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7458) 		*ppos += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7460) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7463) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7464) ftrace_pid_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7465) 		 size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7467) 	return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7470) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7471) ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7472) 		    size_t cnt, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7474) 	return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7477) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7478) ftrace_pid_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7480) 	struct trace_array *tr = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7482) 	trace_array_put(tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7484) 	return seq_release(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7487) static const struct file_operations ftrace_pid_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7488) 	.open		= ftrace_pid_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7489) 	.write		= ftrace_pid_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7490) 	.read		= seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7491) 	.llseek		= tracing_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7492) 	.release	= ftrace_pid_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7493) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7495) static const struct file_operations ftrace_no_pid_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7496) 	.open		= ftrace_no_pid_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7497) 	.write		= ftrace_no_pid_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7498) 	.read		= seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7499) 	.llseek		= tracing_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7500) 	.release	= ftrace_pid_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7501) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7503) void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7505) 	trace_create_file("set_ftrace_pid", 0644, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7506) 			    tr, &ftrace_pid_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7507) 	trace_create_file("set_ftrace_notrace_pid", 0644, d_tracer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7508) 			    tr, &ftrace_no_pid_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7511) void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7512) 					 struct dentry *d_tracer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7514) 	/* Only the top level directory has the dyn_tracefs and profile */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7515) 	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7517) 	ftrace_init_dyn_tracefs(d_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7518) 	ftrace_profile_tracefs(d_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7521) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7522)  * ftrace_kill - kill ftrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7523)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7524)  * This function should be used by panic code. It stops ftrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7525)  * but in a not so nice way. If you need to simply kill ftrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7526)  * from a non-atomic section, use ftrace_kill.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7527)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7528) void ftrace_kill(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7530) 	ftrace_disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7531) 	ftrace_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7532) 	ftrace_trace_function = ftrace_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7535) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7536)  * Test if ftrace is dead or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7537)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7538) int ftrace_is_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7540) 	return ftrace_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7543) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7544)  * register_ftrace_function - register a function for profiling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7545)  * @ops - ops structure that holds the function for profiling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7546)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7547)  * Register a function to be called by all functions in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7548)  * kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7549)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7550)  * Note: @ops->func and all the functions it calls must be labeled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7551)  *       with "notrace", otherwise it will go into a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7552)  *       recursive loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7553)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7554) int register_ftrace_function(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7556) 	int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7558) 	ftrace_ops_init(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7560) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7562) 	ret = ftrace_startup(ops, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7564) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7566) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7568) EXPORT_SYMBOL_GPL(register_ftrace_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7570) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7571)  * unregister_ftrace_function - unregister a function for profiling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7572)  * @ops - ops structure that holds the function to unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7573)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7574)  * Unregister a function that was added to be called by ftrace profiling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7575)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7576) int unregister_ftrace_function(struct ftrace_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7578) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7580) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7581) 	ret = ftrace_shutdown(ops, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7582) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7584) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7586) EXPORT_SYMBOL_GPL(unregister_ftrace_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7588) static bool is_permanent_ops_registered(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7590) 	struct ftrace_ops *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7592) 	do_for_each_ftrace_op(op, ftrace_ops_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7593) 		if (op->flags & FTRACE_OPS_FL_PERMANENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7594) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7595) 	} while_for_each_ftrace_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7597) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7600) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7601) ftrace_enable_sysctl(struct ctl_table *table, int write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7602) 		     void *buffer, size_t *lenp, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7604) 	int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7606) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7608) 	if (unlikely(ftrace_disabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7609) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7611) 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7613) 	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7614) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7616) 	if (ftrace_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7618) 		/* we are starting ftrace again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7619) 		if (rcu_dereference_protected(ftrace_ops_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7620) 			lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7621) 			update_ftrace_function();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7623) 		ftrace_startup_sysctl();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7625) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7626) 		if (is_permanent_ops_registered()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7627) 			ftrace_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7628) 			ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7629) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7630) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7632) 		/* stopping ftrace calls (just send to ftrace_stub) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7633) 		ftrace_trace_function = ftrace_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7635) 		ftrace_shutdown_sysctl();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7636) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7638) 	last_ftrace_enabled = !!ftrace_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7639)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7640) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7641) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7642) }