^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef __ASM_STACKTRACE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define __ASM_STACKTRACE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/sdei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) enum stack_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) STACK_TYPE_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) STACK_TYPE_TASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) STACK_TYPE_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) STACK_TYPE_OVERFLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) STACK_TYPE_SDEI_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) STACK_TYPE_SDEI_CRITICAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) __NR_STACK_TYPES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct stack_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned long low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned long high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) enum stack_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * A snapshot of a frame record or fp/lr register values, along with some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * accounting information necessary for robust unwinding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * @fp: The fp value in the frame record (or the real fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * @pc: The fp value in the frame record (or the real lr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * @stacks_done: Stacks which have been entirely unwound, for which it is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * longer valid to unwind to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * @prev_fp: The fp that pointed to this frame record, or a synthetic value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * of 0. This is used to ensure that within a stack, each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * subsequent frame record is at an increasing address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * @prev_type: The type of stack this frame record was on, or a synthetic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * value of STACK_TYPE_UNKNOWN. This is used to detect a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * transition from one stack to another.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * replacement lr value in the ftrace graph stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct stackframe {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned long fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned long prev_fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) enum stack_type prev_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int graph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) bool (*fn)(void *, unsigned long), void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) const char *loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static inline bool on_stack(unsigned long sp, unsigned long low,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned long high, enum stack_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct stack_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (!low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (sp < low || sp >= high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) info->low = low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) info->high = high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) info->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static inline bool on_irq_stack(unsigned long sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct stack_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned long high = low + IRQ_STACK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return on_stack(sp, low, high, STACK_TYPE_IRQ, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static inline bool on_task_stack(const struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned long sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct stack_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) unsigned long low = (unsigned long)task_stack_page(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned long high = low + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return on_stack(sp, low, high, STACK_TYPE_TASK, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static inline bool on_overflow_stack(unsigned long sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct stack_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned long high = low + OVERFLOW_STACK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return on_stack(sp, low, high, STACK_TYPE_OVERFLOW, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static inline bool on_overflow_stack(unsigned long sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct stack_info *info) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * We can only safely access per-cpu stacks from current in a non-preemptible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static inline bool on_accessible_stack(const struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct stack_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) info->type = STACK_TYPE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (on_task_stack(tsk, sp, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (tsk != current || preemptible())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (on_irq_stack(sp, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (on_overflow_stack(sp, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (on_sdei_stack(sp, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static inline void start_backtrace(struct stackframe *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned long fp, unsigned long pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) frame->fp = fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) frame->pc = pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) frame->graph = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Prime the first unwind.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * In unwind_frame() we'll check that the FP points to a valid stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * treated as a transition to whichever stack that happens to be. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * prev_fp value won't be used, but we set it to 0 such that it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * definitely not an accessible stack address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) frame->prev_fp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) frame->prev_type = STACK_TYPE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #endif /* __ASM_STACKTRACE_H */