Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Infrastructure to took into function calls and returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Mostly borrowed from function tracer which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Highly modified by Steven Rostedt (VMware).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <trace/events/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "ftrace_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define ASSIGN_OPS_HASH(opsname, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	.func_hash		= val, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define ASSIGN_OPS_HASH(opsname, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) static bool kill_ftrace_graph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) int ftrace_graph_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) /* Both enabled by default (can be cleared by function_graph tracer flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static bool fgraph_sleep_time = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * ftrace_graph_stop() is called when a severe error is detected in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * the function graph tracing. This function is called by the critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * paths of function graph to keep those paths from doing any more harm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) bool ftrace_graph_is_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	return kill_ftrace_graph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * ftrace_graph_stop - set to permanently disable function graph tracincg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * In case of an error int function graph tracing, this is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * to try to keep function graph tracing from causing any more harm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * Usually this is pretty severe and this is called to try to at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * get a warning out to the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) void ftrace_graph_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	kill_ftrace_graph = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) /* Add a function return address to the trace stack on thread info.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) ftrace_push_return_trace(unsigned long ret, unsigned long func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 			 unsigned long frame_pointer, unsigned long *retp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	unsigned long long calltime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	if (unlikely(ftrace_graph_is_dead()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	if (!current->ret_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	 * We must make sure the ret_stack is tested before we read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	 * anything else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	/* The return trace stack is full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		atomic_inc(&current->trace_overrun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	calltime = trace_clock_local();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	index = ++current->curr_ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	current->ret_stack[index].ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	current->ret_stack[index].func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	current->ret_stack[index].calltime = calltime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	current->ret_stack[index].fp = frame_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	current->ret_stack[index].retp = retp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * functions. But those archs currently don't support direct functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * anyway, and ftrace_find_rec_direct() is just a stub for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * Define MCOUNT_INSN_SIZE to keep those archs compiling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #ifndef MCOUNT_INSN_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Make sure this only works without direct calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #  error MCOUNT_INSN_SIZE not defined with direct calls enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) # endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) # define MCOUNT_INSN_SIZE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int function_graph_enter(unsigned long ret, unsigned long func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			 unsigned long frame_pointer, unsigned long *retp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct ftrace_graph_ent trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	 * Skip graph tracing if the return location is served by direct trampoline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	 * since call sequence and return addresses is unpredicatable anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	 * Ex: BPF trampoline may call original function and may skip frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	 * depending on type of BPF programs attached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if (ftrace_direct_func_count &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	    ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	trace.func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	trace.depth = ++current->curr_ret_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (ftrace_push_return_trace(ret, func, frame_pointer, retp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	/* Only trace if the calling function expects to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if (!ftrace_graph_entry(&trace))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		goto out_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  out_ret:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	current->curr_ret_stack--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	current->curr_ret_depth--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* Retrieve a function return address to the trace stack on thread info.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			unsigned long frame_pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	index = current->curr_ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		ftrace_graph_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		/* Might as well panic, otherwise we have no where to go */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		*ret = (unsigned long)panic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	 * The arch may choose to record the frame pointer used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	 * and check it here to make sure that it is what we expect it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	 * to be. If gcc does not set the place holder of the return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	 * address in the frame pointer, and does a copy instead, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	 * the function graph trace will fail. This test detects this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	 * case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	 * Currently, x86_32 with optimize for size (-Os) makes the latest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	 * gcc do the above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	 * Note, -mfentry does not use frame pointers, and this test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	 *  is not needed if CC_USING_FENTRY is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		ftrace_graph_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		     "  from func %ps return to %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		     current->ret_stack[index].fp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		     frame_pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		     (void *)current->ret_stack[index].func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		     current->ret_stack[index].ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		*ret = (unsigned long)panic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	*ret = current->ret_stack[index].ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	trace->func = current->ret_stack[index].func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	trace->calltime = current->ret_stack[index].calltime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	trace->overrun = atomic_read(&current->trace_overrun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	trace->depth = current->curr_ret_depth--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	 * We still want to trace interrupts coming in if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	 * max_depth is set to 1. Make sure the decrement is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	 * seen before ftrace_graph_return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  * Hibernation protection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  * The state of the current task is too much unstable during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  * suspend/restore to disk. We want to protect against that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 							void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	case PM_HIBERNATION_PREPARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		pause_graph_tracing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	case PM_POST_HIBERNATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		unpause_graph_tracing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static struct notifier_block ftrace_suspend_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	.notifier_call = ftrace_suspend_notifier_call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * Send the trace to the ring-buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * @return the original return address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	struct ftrace_graph_ret trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	trace.rettime = trace_clock_local();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	ftrace_graph_return(&trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	 * The ftrace_graph_return() may still access the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	 * ret_stack structure, we need to make sure the update of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	 * curr_ret_stack is after that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	current->curr_ret_stack--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	if (unlikely(!ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		ftrace_graph_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		/* Might as well panic. What else to do? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		ret = (unsigned long)panic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  * ftrace_graph_get_ret_stack - return the entry of the shadow stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * @task: The task to read the shadow stack from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  * @idx: Index down the shadow stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * Return the ret_struct on the shadow stack of the @task at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  * call graph at @idx starting with zero. If @idx is zero, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  * will return the last saved ret_stack entry. If it is greater than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  * zero, it will return the corresponding ret_stack for the depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  * of saved return addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct ftrace_ret_stack *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	idx = task->curr_ret_stack - idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	if (idx >= 0 && idx <= task->curr_ret_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		return &task->ret_stack[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  * ftrace_graph_ret_addr - convert a potentially modified stack return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  *			   to its original value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  * This function can be called by stack unwinding code to convert a found stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  * return address ('ret') to its original value, in case the function graph
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  * tracer has modified it to be 'return_to_handler'.  If the address hasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * been modified, the unchanged value of 'ret' is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * 'idx' is a state variable which should be initialized by the caller to zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * before the first call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * 'retp' is a pointer to the return address on the stack.  It's ignored if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 				    unsigned long ret, unsigned long *retp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	int index = task->curr_ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	if (index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	for (i = 0; i <= index; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		if (task->ret_stack[i].retp == retp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 			return task->ret_stack[i].ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 				    unsigned long ret, unsigned long *retp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	int task_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	task_idx = task->curr_ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	if (!task->ret_stack || task_idx < *idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	task_idx -= *idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	(*idx)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	return task->ret_stack[task_idx].ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static struct ftrace_ops graph_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	.func			= ftrace_stub,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 				   FTRACE_OPS_FL_INITIALIZED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 				   FTRACE_OPS_FL_PID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 				   FTRACE_OPS_FL_STUB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) #ifdef FTRACE_GRAPH_TRAMP_ADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	/* trampoline_size is only needed for dynamically allocated tramps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) void ftrace_graph_sleep_time_control(bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	fgraph_sleep_time = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  * Simply points to ftrace_stub, but with the proper protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)  * Defined by the linker script in linux/vmlinux.lds.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) extern void ftrace_stub_graph(struct ftrace_graph_ret *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* The callbacks that hook a function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	struct task_struct *g, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		ret_stack_list[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			kmalloc_array(FTRACE_RETFUNC_DEPTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 				      sizeof(struct ftrace_ret_stack),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 				      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		if (!ret_stack_list[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			end = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 			goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	for_each_process_thread(g, t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		if (start == end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 			ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 			goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		if (t->ret_stack == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			atomic_set(&t->trace_overrun, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 			t->curr_ret_stack = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 			t->curr_ret_depth = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 			/* Make sure the tasks see the -1 first: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 			smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 			t->ret_stack = ret_stack_list[start++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	for (i = start; i < end; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		kfree(ret_stack_list[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			struct task_struct *prev, struct task_struct *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	unsigned long long timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	 * Does the user want to count the time a function was asleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	 * If so, do not update the time stamps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	if (fgraph_sleep_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	timestamp = trace_clock_local();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	prev->ftrace_timestamp = timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	/* only process tasks that we timestamped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	if (!next->ftrace_timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	 * Update all the counters in next to make up for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	 * time next was sleeping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	timestamp -= next->ftrace_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	for (index = next->curr_ret_stack; index >= 0; index--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		next->ret_stack[index].calltime += timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	return __ftrace_graph_entry(trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)  * The function graph tracer should only trace the functions defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)  * by set_ftrace_filter and set_ftrace_notrace. If another function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)  * tracer ops is registered, the graph tracer requires testing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)  * function against the global ops, and not just trace any function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  * that any ftrace_ops registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) void update_function_graph_func(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	struct ftrace_ops *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	bool do_test = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	 * The graph and global ops share the same set of functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	 * to test. If any other ops is on the list, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	 * the graph tracing needs to test if its the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	 * it should call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	do_for_each_ftrace_op(op, ftrace_ops_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		if (op != &global_ops && op != &graph_ops &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		    op != &ftrace_list_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 			do_test = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 			/* in double loop, break out with goto */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	} while_for_each_ftrace_op(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	if (do_test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		ftrace_graph_entry = ftrace_graph_entry_test;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		ftrace_graph_entry = __ftrace_graph_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	atomic_set(&t->trace_overrun, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	t->ftrace_timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	/* make curr_ret_stack visible before we add the ret_stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	t->ret_stack = ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)  * Allocate a return stack for the idle task. May be the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)  * time through, or it may be done by CPU hotplug online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	t->curr_ret_stack = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	t->curr_ret_depth = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	 * The idle task has no parent, it either has its own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	 * stack or no stack at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	if (t->ret_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	if (ftrace_graph_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		struct ftrace_ret_stack *ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		ret_stack = per_cpu(idle_ret_stack, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		if (!ret_stack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 			ret_stack =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 				kmalloc_array(FTRACE_RETFUNC_DEPTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 					      sizeof(struct ftrace_ret_stack),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 					      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 			if (!ret_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 			per_cpu(idle_ret_stack, cpu) = ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		graph_init_task(t, ret_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* Allocate a return stack for newly created task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) void ftrace_graph_init_task(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	/* Make sure we do not use the parent ret_stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	t->ret_stack = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	t->curr_ret_stack = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	t->curr_ret_depth = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	if (ftrace_graph_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		struct ftrace_ret_stack *ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 					  sizeof(struct ftrace_ret_stack),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 					  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		if (!ret_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		graph_init_task(t, ret_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) void ftrace_graph_exit_task(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	t->ret_stack = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	/* NULL must become visible to IRQs before we free it: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	kfree(ret_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Allocate a return stack for each task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static int start_graph_tracing(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	struct ftrace_ret_stack **ret_stack_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	int ret, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 				       sizeof(struct ftrace_ret_stack *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 				       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	if (!ret_stack_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	/* The cpu_boot init_task->ret_stack will never be freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		if (!idle_task(cpu)->ret_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		ret = alloc_retstack_tasklist(ret_stack_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	} while (ret == -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 			pr_info("ftrace_graph: Couldn't activate tracepoint"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 				" probe to kernel_sched_switch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	kfree(ret_stack_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) int register_ftrace_graph(struct fgraph_ops *gops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	/* we currently allow only one tracer registered at a time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	if (ftrace_graph_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	register_pm_notifier(&ftrace_suspend_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	ftrace_graph_active++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	ret = start_graph_tracing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		ftrace_graph_active--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	ftrace_graph_return = gops->retfunc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	 * Update the indirect function to the entryfunc, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	 * function that gets called to the entry_test first. Then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	 * call the update fgraph entry function to determine if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	 * the entryfunc should be called directly or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	__ftrace_graph_entry = gops->entryfunc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	ftrace_graph_entry = ftrace_graph_entry_test;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	update_function_graph_func();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) void unregister_ftrace_graph(struct fgraph_ops *gops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	mutex_lock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	if (unlikely(!ftrace_graph_active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	ftrace_graph_active--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	ftrace_graph_return = ftrace_stub_graph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	ftrace_graph_entry = ftrace_graph_entry_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	__ftrace_graph_entry = ftrace_graph_entry_stub;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	unregister_pm_notifier(&ftrace_suspend_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	mutex_unlock(&ftrace_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }