^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifdef CONFIG_FRAME_POINTER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) struct stackframe {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) unsigned long fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) unsigned long ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) bool (*fn)(unsigned long, void *), void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) unsigned long fp, sp, pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) if (regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) fp = frame_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) sp = user_stack_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) pc = instruction_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) } else if (task == NULL || task == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) const register unsigned long current_sp __asm__ ("sp");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) const register unsigned long current_fp __asm__ ("r8");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) fp = current_fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) sp = current_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) pc = (unsigned long)walk_stackframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* task blocked in __switch_to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) fp = thread_saved_fp(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) sp = thread_saved_sp(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) pc = thread_saved_lr(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long low, high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct stackframe *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Validate frame pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) low = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) high = ALIGN(sp, THREAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (unlikely(fp < low || fp > high || fp & 0x3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Unwind stack frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) frame = (struct stackframe *)fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) sp = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) fp = frame->fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) (unsigned long *)(fp - 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #else /* !CONFIG_FRAME_POINTER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static void notrace walk_stackframe(struct task_struct *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned long sp, pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned long *ksp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) sp = user_stack_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) pc = instruction_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) } else if (task == NULL || task == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) const register unsigned long current_sp __asm__ ("sp");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) sp = current_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) pc = (unsigned long)walk_stackframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* task blocked in __switch_to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) sp = thread_saved_sp(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) pc = thread_saved_lr(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (unlikely(sp & 0x3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ksp = (unsigned long *)sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) while (!kstack_end(ksp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) pc = (*ksp++) - 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #endif /* CONFIG_FRAME_POINTER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static bool print_trace_address(unsigned long pc, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) print_ip_sym((const char *)arg, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) pr_cont("Call Trace:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static bool save_wchan(unsigned long pc, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!in_sched_functions(pc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned long *p = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *p = pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned long get_wchan(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned long pc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (likely(task && task != current && task->state != TASK_RUNNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) walk_stackframe(task, NULL, save_wchan, &pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #ifdef CONFIG_STACKTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static bool __save_trace(unsigned long pc, void *arg, bool nosched)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct stack_trace *trace = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (unlikely(nosched && in_sched_functions(pc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (unlikely(trace->skip > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) trace->skip--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) trace->entries[trace->nr_entries++] = pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return (trace->nr_entries >= trace->max_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static bool save_trace(unsigned long pc, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return __save_trace(pc, arg, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Save stack-backtrace addresses into a stack_trace buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) walk_stackframe(tsk, NULL, save_trace, trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) void save_stack_trace(struct stack_trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) save_stack_trace_tsk(NULL, trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) EXPORT_SYMBOL_GPL(save_stack_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #endif /* CONFIG_STACKTRACE */