^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sysctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define STACK_TRACE_ENTRIES 500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static unsigned int stack_trace_nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static unsigned long stack_trace_max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static arch_spinlock_t stack_trace_max_lock =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) DEFINE_PER_CPU(int, disable_stack_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static DEFINE_MUTEX(stack_sysctl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) int stack_tracer_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static void print_max_stack(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) pr_emerg(" Depth Size Location (%d entries)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) " ----- ---- --------\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) stack_trace_nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) for (i = 0; i < stack_trace_nr_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (i + 1 == stack_trace_nr_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) size = stack_trace_index[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) size = stack_trace_index[i] - stack_trace_index[i+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) size, (void *)stack_dump_trace[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * The stack tracer looks for a maximum stack at each call from a function. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * registers a callback from ftrace, and in that callback it examines the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * size. It determines the stack size from the variable passed in, which is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * address of a local variable in the stack_trace_call() callback function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * The stack size is calculated by the address of the local variable to the top
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * of the current stack. If that size is smaller than the currently saved max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * stack size, nothing more is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * If the size of the stack is greater than the maximum recorded size, then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * following algorithm takes place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * For architectures (like x86) that store the function's return address before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * saving the function's local variables, the stack will look something like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * [ top of stack ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * 0: sys call entry frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * 10: return addr to entry code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * 11: start of sys_foo frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * 20: return addr to sys_foo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * 21: start of kernel_func_bar frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * 30: return addr to kernel_func_bar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * 31: [ do trace stack here ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * The save_stack_trace() is called returning all the functions it finds in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * current stack. Which would be (from the bottom of the stack to the top):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * return addr to kernel_func_bar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * return addr to sys_foo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * return addr to entry code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Now to figure out how much each of these functions' local variable size is,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * a search of the stack is made to find these values. When a match is made, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * is added to the stack_dump_trace[] array. The offset into the stack is saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * in the stack_trace_index[] array. The above example would show:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * stack_dump_trace[] | stack_trace_index[]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * ------------------ + -------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * return addr to kernel_func_bar | 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * return addr to sys_foo | 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * return addr to entry | 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * The print_max_stack() function above, uses these values to print the size of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * each function's portion of the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * for (i = 0; i < nr_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * size = i == nr_entries - 1 ? stack_trace_index[i] :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * stack_trace_index[i] - stack_trace_index[i+1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * print "%d %d %d %s\n", i, stack_trace_index[i], size, stack_dump_trace[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * The above shows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * depth size location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * ----- ---- --------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * 0 30 10 kernel_func_bar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * 1 20 10 sys_foo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * 2 10 10 entry code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * Now for architectures that might save the return address after the functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * local variables (saving the link register before calling nested functions),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * this will cause the stack to look a little different:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * [ top of stack ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * 0: sys call entry frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * 10: start of sys_foo_frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * 19: return addr to entry code << lr saved before calling kernel_func_bar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * 20: start of kernel_func_bar frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * 29: return addr to sys_foo_frame << lr saved before calling next function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * 30: [ do trace stack here ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Although the functions returned by save_stack_trace() may be the same, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * placement in the stack will be different. Using the same algorithm as above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * would yield:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * stack_dump_trace[] | stack_trace_index[]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * ------------------ + -------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * return addr to kernel_func_bar | 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * return addr to sys_foo | 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * return addr to entry | 19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Where the mapping is off by one:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * kernel_func_bar stack frame size is 29 - 19 not 30 - 29!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * To fix this, if the architecture sets ARCH_RET_ADDR_AFTER_LOCAL_VARS the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * values in stack_trace_index[] are shifted by one to and the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * stack trace entries is decremented by one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * stack_dump_trace[] | stack_trace_index[]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * ------------------ + -------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * return addr to kernel_func_bar | 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * return addr to sys_foo | 19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Although the entry function is not displayed, the first function (sys_foo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * will still include the stack size of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void check_stack(unsigned long ip, unsigned long *stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned long this_size, flags; unsigned long *p, *top, *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static int tracer_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int frame_size = READ_ONCE(tracer_frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int i, x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) this_size = THREAD_SIZE - this_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Remove the frame of the tracer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) this_size -= frame_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (this_size <= stack_trace_max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* we do not handle interrupt stacks yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (!object_is_on_stack(stack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* Can't do this from NMI context (can cause deadlocks) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (in_nmi())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) arch_spin_lock(&stack_trace_max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* In case another CPU set the tracer_frame on us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (unlikely(!frame_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) this_size -= tracer_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* a race could have already updated it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (this_size <= stack_trace_max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) stack_trace_max_size = this_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ARRAY_SIZE(stack_dump_trace) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Skip over the overhead of the stack tracer itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) for (i = 0; i < stack_trace_nr_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (stack_dump_trace[i] == ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Some archs may not have the passed in ip in the dump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * If that happens, we need to show everything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (i == stack_trace_nr_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * Now find where in the stack these are.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) start = stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) top = (unsigned long *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * Loop through all the entries. One of the entries may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * for some reason be missed on the stack, so we may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * have to account for them. If they are all there, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * loop will only happen once. This code only takes place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * on a new max, so it is far from a fast path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) while (i < stack_trace_nr_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) stack_trace_index[x] = this_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) p = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) for (; p < top && i < stack_trace_nr_entries; p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * The READ_ONCE_NOCHECK is used to let KASAN know that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * this is not a stack-out-of-bounds error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) stack_dump_trace[x] = stack_dump_trace[i++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) this_size = stack_trace_index[x++] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) (top - p) * sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* Start the search from here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) start = p + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * We do not want to show the overhead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * of the stack tracer stack in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * max stack. If we haven't figured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * out what that is, then figure it out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (unlikely(!tracer_frame)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) tracer_frame = (p - stack) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) stack_trace_max_size -= tracer_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #ifdef ARCH_FTRACE_SHIFT_STACK_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Some archs will store the link register before calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * nested functions. This means the saved return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * comes after the local storage, and we need to shift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (x > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) memmove(&stack_trace_index[0], &stack_trace_index[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) sizeof(stack_trace_index[0]) * (x - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) x--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) stack_trace_nr_entries = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (task_stack_end_corrupted(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) print_max_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) arch_spin_unlock(&stack_trace_max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* Some archs may not define MCOUNT_INSN_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #ifndef MCOUNT_INSN_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) # define MCOUNT_INSN_SIZE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) stack_trace_call(unsigned long ip, unsigned long parent_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct ftrace_ops *op, struct pt_regs *pt_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) unsigned long stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) preempt_disable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* no atomic needed, we only modify this variable by this cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) __this_cpu_inc(disable_stack_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (__this_cpu_read(disable_stack_tracer) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* If rcu is not watching, then save stack trace can fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!rcu_is_watching())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ip += MCOUNT_INSN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) check_stack(ip, &stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) __this_cpu_dec(disable_stack_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* prevent recursion in schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) preempt_enable_notrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static struct ftrace_ops trace_ops __read_mostly =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) .func = stack_trace_call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) .flags = FTRACE_OPS_FL_RECURSION_SAFE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) stack_max_size_read(struct file *filp, char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) unsigned long *ptr = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) char buf[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (r > sizeof(buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) r = sizeof(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return simple_read_from_buffer(ubuf, count, ppos, buf, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) stack_max_size_write(struct file *filp, const char __user *ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) long *ptr = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) unsigned long val, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ret = kstrtoul_from_user(ubuf, count, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * In case we trace inside arch_spin_lock() or after (NMI),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * we will cause circular lock, so we also need to increase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * the percpu disable_stack_tracer here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) __this_cpu_inc(disable_stack_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) arch_spin_lock(&stack_trace_max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) *ptr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) arch_spin_unlock(&stack_trace_max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) __this_cpu_dec(disable_stack_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static const struct file_operations stack_max_size_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) .open = tracing_open_generic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) .read = stack_max_size_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) .write = stack_max_size_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) .llseek = default_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) __next(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) long n = *pos - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (n >= stack_trace_nr_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) m->private = (void *)n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return &m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) t_next(struct seq_file *m, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) (*pos)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return __next(m, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static void *t_start(struct seq_file *m, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) __this_cpu_inc(disable_stack_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) arch_spin_lock(&stack_trace_max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (*pos == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return SEQ_START_TOKEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return __next(m, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static void t_stop(struct seq_file *m, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) arch_spin_unlock(&stack_trace_max_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) __this_cpu_dec(disable_stack_tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void trace_lookup_stack(struct seq_file *m, long i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) unsigned long addr = stack_dump_trace[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) seq_printf(m, "%pS\n", (void *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void print_disabled(struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) seq_puts(m, "#\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) "# Stack tracer disabled\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) "#\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) "# To enable the stack tracer, either add 'stacktrace' to the\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) "# kernel command line\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) "#\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static int t_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (v == SEQ_START_TOKEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) seq_printf(m, " Depth Size Location"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) " (%d entries)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) " ----- ---- --------\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) stack_trace_nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (!stack_tracer_enabled && !stack_trace_max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) print_disabled(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) i = *(long *)v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (i >= stack_trace_nr_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (i + 1 == stack_trace_nr_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) size = stack_trace_index[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) size = stack_trace_index[i] - stack_trace_index[i+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) trace_lookup_stack(m, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static const struct seq_operations stack_trace_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) .start = t_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) .next = t_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) .stop = t_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) .show = t_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static int stack_trace_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ret = security_locked_down(LOCKDOWN_TRACEFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return seq_open(file, &stack_trace_seq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static const struct file_operations stack_trace_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) .open = stack_trace_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) .release = seq_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) stack_trace_filter_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct ftrace_ops *ops = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /* Checks for tracefs lockdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static const struct file_operations stack_trace_filter_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) .open = stack_trace_filter_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) .write = ftrace_filter_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) .llseek = tracing_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) .release = ftrace_regex_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) #endif /* CONFIG_DYNAMIC_FTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) size_t *lenp, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) int was_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) mutex_lock(&stack_sysctl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) was_enabled = !!stack_tracer_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ret = proc_dointvec(table, write, buffer, lenp, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (ret || !write || (was_enabled == !!stack_tracer_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (stack_tracer_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) register_ftrace_function(&trace_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) unregister_ftrace_function(&trace_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) mutex_unlock(&stack_sysctl_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static __init int enable_stacktrace(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if ((len = str_has_prefix(str, "_filter=")))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) stack_tracer_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) __setup("stacktrace", enable_stacktrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static __init int stack_trace_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) ret = tracing_init_dentry();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) trace_create_file("stack_max_size", 0644, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) &stack_trace_max_size, &stack_max_size_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) trace_create_file("stack_trace", 0444, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) NULL, &stack_trace_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) trace_create_file("stack_trace_filter", 0644, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) &trace_ops, &stack_trace_filter_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (stack_trace_filter_buf[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (stack_tracer_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) register_ftrace_function(&trace_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) device_initcall(stack_trace_init);