^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Stack trace management functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/unwind.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct task_struct *task, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct unwind_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) if (regs && !consume_entry(cookie, regs->ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unwind_next_frame(&state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) addr = unwind_get_return_address(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (!addr || !consume_entry(cookie, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * This function returns an error if it detects any unreliable features of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * stack. Otherwise it guarantees that the stack trace is reliable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * If the task is not 'current', the caller *must* ensure the task is inactive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void *cookie, struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct unwind_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct pt_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) for (unwind_start(&state, task, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) !unwind_done(&state) && !unwind_error(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unwind_next_frame(&state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) regs = unwind_get_entry_regs(&state, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Success path for user tasks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Kernel mode registers on the stack indicate an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * in-kernel interrupt or exception (e.g., preemption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * or a page fault), which can make frame pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * unreliable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (IS_ENABLED(CONFIG_FRAME_POINTER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) addr = unwind_get_return_address(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * A NULL or invalid return address probably means there's some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * generated code which __kernel_text_address() doesn't know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (!consume_entry(cookie, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* Check for stack corruption */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (unwind_error(&state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct stack_frame_user {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) const void __user *next_fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned long ret_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) copy_stack_frame(const struct stack_frame_user __user *fp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct stack_frame_user *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) pagefault_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (__get_user(frame->next_fp, &fp->next_fp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) __get_user(frame->ret_addr, &fp->ret_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) pagefault_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) const struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) const void __user *fp = (const void __user *)regs->bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (!consume_entry(cookie, regs->ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct stack_frame_user frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) frame.next_fp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) frame.ret_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (!copy_stack_frame(fp, &frame))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if ((unsigned long)fp < regs->sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!frame.ret_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (!consume_entry(cookie, frame.ret_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) fp = frame.next_fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)