^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 1991, 1992 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kasan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/cpu_entry_area.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/unwind.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) int panic_on_unrecovered_nmi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) int panic_on_io_nmi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static int die_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static struct pt_regs exec_summary_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) bool noinstr in_task_stack(unsigned long *stack, struct task_struct *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct stack_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long *begin = task_stack_page(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned long *end = task_stack_page(task) + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (stack < begin || stack >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) info->type = STACK_TYPE_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) info->begin = begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) info->end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) info->next_sp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* Called from get_stack_info_noinstr - so must be noinstr too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) bool noinstr in_entry_stack(unsigned long *stack, struct stack_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) void *begin = ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void *end = ss + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if ((void *)stack < begin || (void *)stack >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) info->type = STACK_TYPE_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) info->begin = begin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) info->end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) info->next_sp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static void printk_stack_address(unsigned long address, int reliable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) const char *log_lvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) touch_nmi_watchdog();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned int nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return copy_from_kernel_nofault(buf, (u8 *)src, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* The user space code from other tasks cannot be accessed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (regs != task_pt_regs(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Make sure userspace isn't trying to trick us into dumping kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * memory by pointing the userspace instruction pointer at it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * Even if named copy_from_user_nmi() this can be invoked from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * other contexts and will not try to resolve a pagefault, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * the correct thing to do here as this code can be called from any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return copy_from_user_nmi(buf, (void __user *)src, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * There are a couple of reasons for the 2/3rd prologue, courtesy of Linus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * In case where we don't have the exact kernel image (which, if we did, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * simply disassemble and navigate to the RIP), the purpose of the bigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * prologue is to have more context and to be able to correlate the code from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * the different toolchains better.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * In addition, it helps in recreating the register allocation of the failing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * kernel and thus make sense of the register dump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * What is more, the additional complication of a variable length insn arch like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * x86 warrants having longer byte sequence before rIP so that the disassembler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * can "sync" up properly and find instruction boundaries when decoding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * opcode bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * guesstimate in attempt to achieve all of the above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void show_opcodes(struct pt_regs *regs, const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define PROLOGUE_SIZE 42
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define EPILOGUE_SIZE 21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u8 opcodes[OPCODE_BUFSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned long prologue = regs->ip - PROLOGUE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) switch (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) case -EPERM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* No access to the user space stack of other tasks. Ignore. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) loglvl, prologue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void show_ip(struct pt_regs *regs, const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) printk("%sEIP: %pS\n", loglvl, (void *)regs->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) show_opcodes(regs, loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) void show_iret_regs(struct pt_regs *regs, const char *log_lvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) show_ip(regs, log_lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) printk("%sRSP: %04x:%016lx EFLAGS: %08lx", log_lvl, (int)regs->ss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) regs->sp, regs->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) bool partial, const char *log_lvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * These on_stack() checks aren't strictly necessary: the unwind code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * has already validated the 'regs' pointer. The checks are done for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * ordering reasons: if the registers are on the next stack, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * want to print them out yet. Otherwise they'll be shown as part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * the wrong stack. Later, when show_trace_log_lvl() switches to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * next stack, this function will be called again with the same regs so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * they can be printed in the right context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!partial && on_stack(info, regs, sizeof(*regs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) __show_regs(regs, SHOW_REGS_SHORT, log_lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) IRET_FRAME_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * When an interrupt or exception occurs in entry code, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * full pt_regs might not have been saved yet. In that case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * just print the iret frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) show_iret_regs(regs, log_lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned long *stack, const char *log_lvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct unwind_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct stack_info stack_info = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned long visit_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int graph_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) bool partial = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) printk("%sCall Trace:\n", log_lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) unwind_start(&state, task, regs, stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) stack = stack ? : get_stack_pointer(task, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) regs = unwind_get_entry_regs(&state, &partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Iterate through the stacks, starting with the current stack pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * Each stack has a pointer to the next one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * x86-64 can have several stacks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * - task stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * - interrupt stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * - HW exception stacks (double fault, nmi, debug, mce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * - entry stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * x86-32 can have up to four stacks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * - task stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * - softirq stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * - hardirq stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * - entry stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) const char *stack_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * We weren't on a valid stack. It's possible that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * we overflowed a valid stack into a guard page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * See if the next page up is valid so that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * generate some kind of backtrace if this happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (get_stack_info(stack, task, &stack_info, &visit_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) stack_name = stack_type_name(stack_info.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (stack_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) printk("%s <%s>\n", log_lvl, stack_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) show_regs_if_on_stack(&stack_info, regs, partial, log_lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * Scan the stack, printing any text addresses we find. At the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * same time, follow proper stack frames with the unwinder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * Addresses found during the scan which are not reported by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * the unwinder are considered to be additional clues which are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * sometimes useful for debugging and are prefixed with '?'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * This also serves as a failsafe option in case the unwinder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * goes off in the weeds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) for (; stack < stack_info.end; stack++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) unsigned long real_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int reliable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) unsigned long addr = READ_ONCE_NOCHECK(*stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) unsigned long *ret_addr_p =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) unwind_get_return_address_ptr(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (!__kernel_text_address(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * Don't print regs->ip again if it was already printed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * by show_regs_if_on_stack().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (regs && stack == ®s->ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (stack == ret_addr_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) reliable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * When function graph tracing is enabled for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * function, its return address on the stack is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * replaced with the address of an ftrace handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * (return_to_handler). In that case, before printing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * the "real" address, we want to print the handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * address as an "unreliable" hint that function graph
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * tracing was involved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) real_addr = ftrace_graph_ret_addr(task, &graph_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) addr, stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (real_addr != addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) printk_stack_address(addr, 0, log_lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) printk_stack_address(real_addr, reliable, log_lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (!reliable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * Get the next frame from the unwinder. No need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * check for an error: if anything goes wrong, the rest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * of the addresses will just be printed as unreliable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unwind_next_frame(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* if the frame has entry regs, print them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) regs = unwind_get_entry_regs(&state, &partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) show_regs_if_on_stack(&stack_info, regs, partial, log_lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (stack_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) printk("%s </%s>\n", log_lvl, stack_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) void show_stack(struct task_struct *task, unsigned long *sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) task = task ? : current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Stack frames below this one aren't interesting. Don't show them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * if we're printing for %current.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!sp && task == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) sp = get_stack_pointer(current, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) show_trace_log_lvl(task, NULL, sp, loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) void show_stack_regs(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static int die_owner = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static unsigned int die_nest_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned long oops_begin(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) oops_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* racy, but better than risking deadlock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) raw_local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (!arch_spin_trylock(&die_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (cpu == die_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* nested oops. should stop eventually */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) arch_spin_lock(&die_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) die_nest_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) die_owner = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) console_verbose();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) bust_spinlocks(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) NOKPROBE_SYMBOL(oops_begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) void __noreturn rewind_stack_do_exit(int signr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (regs && kexec_should_crash(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) crash_kexec(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) bust_spinlocks(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) die_owner = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) die_nest_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!die_nest_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* Nest count reaches zero, release the lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) arch_spin_unlock(&die_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) raw_local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) oops_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* Executive summary in case the oops scrolled away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) __show_regs(&exec_summary_regs, SHOW_REGS_ALL, KERN_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (!signr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (in_interrupt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) panic("Fatal exception in interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (panic_on_oops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) panic("Fatal exception");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * We're not going to return, but we might be on an IST stack or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * have very little stack space left. Rewind the stack and kill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * the task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * Before we rewind the stack, we have to tell KASAN that we're going to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * reuse the task stack and that existing poisons are invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) kasan_unpoison_task_stack(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) rewind_stack_do_exit(signr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) NOKPROBE_SYMBOL(oops_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static void __die_header(const char *str, struct pt_regs *regs, long err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) const char *pr = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* Save the regs of the first oops for the executive summary later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (!die_counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) exec_summary_regs = *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (IS_ENABLED(CONFIG_PREEMPTION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) printk(KERN_DEFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) pr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) NOKPROBE_SYMBOL(__die_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static int __die_body(const char *str, struct pt_regs *regs, long err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) print_modules();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (notify_die(DIE_OOPS, str, regs, err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) NOKPROBE_SYMBOL(__die_body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int __die(const char *str, struct pt_regs *regs, long err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) __die_header(str, regs, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return __die_body(str, regs, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) NOKPROBE_SYMBOL(__die);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * This is gone through when something in the kernel has done something bad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * and is about to be terminated:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) void die(const char *str, struct pt_regs *regs, long err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) unsigned long flags = oops_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int sig = SIGSEGV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (__die(str, regs, err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) sig = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) oops_end(flags, regs, sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) void die_addr(const char *str, struct pt_regs *regs, long err, long gp_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) unsigned long flags = oops_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) int sig = SIGSEGV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) __die_header(str, regs, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (gp_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) kasan_non_canonical_hook(gp_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (__die_body(str, regs, err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) sig = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) oops_end(flags, regs, sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) void show_regs(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) enum show_regs_mode print_kernel_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) show_regs_print_info(KERN_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) print_kernel_regs = user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) __show_regs(regs, print_kernel_regs, KERN_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * When in-kernel, we also print out the stack at the time of the fault..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }