^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * stacktrace.c : stacktracing APIs needed by rest of kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * (wrappers over ARC dwarf based unwinder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * vineetg: aug 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * for displaying task's kernel mode call stack in /proc/<pid>/stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * -Iterator based approach to have single copy of unwinding core and APIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * needing unwinding, implement the logic in iterator regarding:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * = which frame onwards to start capture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * = which frame to stop capturing (wchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * vineetg: March 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * -Implemented correct versions of thread_saved_pc() and get_wchan()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * rajeshwarr: 2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * -Initial implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/arcregs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/unwind.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*-------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Unwinder Iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *-------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #ifdef CONFIG_ARC_DW2_UNWIND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct unwind_frame_info *frame_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Asynchronous unwinding of intr/exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * - Just uses the pt_regs passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) frame_info->task = tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) frame_info->regs.r27 = regs->fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) frame_info->regs.r28 = regs->sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) frame_info->regs.r31 = regs->blink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) frame_info->regs.r63 = regs->ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) frame_info->call_frame = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) } else if (tsk == NULL || tsk == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * synchronous unwinding (e.g. dump_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * - uses current values of SP and friends
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned long fp, sp, blink, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) frame_info->task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) "mov %0,r27\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) "mov %1,r28\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) "mov %2,r31\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) "mov %3,r63\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) frame_info->regs.r27 = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) frame_info->regs.r28 = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) frame_info->regs.r31 = blink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) frame_info->regs.r63 = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) frame_info->call_frame = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * Asynchronous unwinding of a likely sleeping task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * - first ensure it is actually sleeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * - if so, it will be in __switch_to, kernel mode SP of task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * is safe-kept and BLINK at a well known location in there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (tsk->state == TASK_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) frame_info->task = tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) frame_info->regs.r27 = TSK_K_FP(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) frame_info->regs.r28 = TSK_K_ESP(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) frame_info->regs.r31 = TSK_K_BLINK(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) frame_info->regs.r63 = (unsigned int)__switch_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* In the prologue of __switch_to, first FP is saved on stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * and then SP is copied to FP. Dwarf assumes cfa as FP based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * but we didn't save FP. The value retrieved above is FP's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * state in previous frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * As a work around for this, we unwind from __switch_to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * and adjust SP accordingly. The other limitation is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * __switch_to macro is dwarf rules are not generated for inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * assembly code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) frame_info->regs.r27 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) frame_info->regs.r28 += 60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) frame_info->call_frame = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) notrace noinline unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int (*consumer_fn) (unsigned int, void *), void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #ifdef CONFIG_ARC_DW2_UNWIND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int ret = 0, cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) unsigned int address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct unwind_frame_info frame_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (seed_unwind_frame_info(tsk, regs, &frame_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) address = UNW_PC(&frame_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (!address || !__kernel_text_address(address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (consumer_fn(address, arg) == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ret = arc_unwind(&frame_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) frame_info.regs.r63 = frame_info.regs.r31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (cnt++ > 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) printk("unwinder looping too long, aborting !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return address; /* return the last address it saw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* On ARC, only Dward based unwinder works. fp based backtracing is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * not possible (-fno-omit-frame-pointer) because of the way function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * prelogue is setup (callee regs saved and then fp set and not other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * way around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*-------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * callbacks called by unwinder iterator to implement kernel APIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * The callback can return -1 to force the iterator to stop, which by default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * keeps going till the bottom-most frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *-------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Call-back which plugs into unwinding core to dump the stack in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * case of panic/OOPs/BUG etc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static int __print_sym(unsigned int address, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) const char *loglvl = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) printk("%s %pS\n", loglvl, (void *)address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #ifdef CONFIG_STACKTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* Call-back which plugs into unwinding core to capture the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * traces needed by kernel on /proc/<pid>/stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static int __collect_all(unsigned int address, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct stack_trace *trace = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (trace->skip > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) trace->skip--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) trace->entries[trace->nr_entries++] = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (trace->nr_entries >= trace->max_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static int __collect_all_but_sched(unsigned int address, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct stack_trace *trace = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (in_sched_functions(address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (trace->skip > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) trace->skip--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) trace->entries[trace->nr_entries++] = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (trace->nr_entries >= trace->max_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static int __get_first_nonsched(unsigned int address, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (in_sched_functions(address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /*-------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * APIs expected by various kernel sub-systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *-------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) printk("%s\nStack Trace:\n", loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) EXPORT_SYMBOL(show_stacktrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* Expected by sched Code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) show_stacktrace(tsk, NULL, loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* Another API expected by schedular, shows up in "ps" as Wait Channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * Of course just returning schedule( ) would be pointless so unwind until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * the function is not in schedular code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) unsigned int get_wchan(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #ifdef CONFIG_STACKTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * A typical use is when /proc/<pid>/stack is queried by userland
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* Assumes @tsk is sleeping so unwinds from __switch_to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void save_stack_trace(struct stack_trace *trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* Pass NULL for task so it unwinds the current call frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) arc_unwind_core(NULL, NULL, __collect_all, trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) EXPORT_SYMBOL_GPL(save_stack_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #endif