^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/kernel/traps.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1995-2009 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * 'traps.c' handles hardware exceptions after we have saved some state in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * kill the offending process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/personality.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/exception.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/spectre.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/unwind.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/tls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/system_misc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/opcodes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static const char *handler[]= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) "prefetch abort",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) "data abort",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) "address exception",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) "interrupt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) "undefined instruction",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) void *vectors_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #ifdef CONFIG_DEBUG_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned int user_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static int __init user_debug_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) get_option(&str, &user_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) __setup("user_debug=", user_debug_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static void dump_mem(const char *, const char *, unsigned long, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) void dump_backtrace_entry(unsigned long where, unsigned long from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned long frame, const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned long end = frame + 4 + sizeof(struct pt_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #ifdef CONFIG_KALLSYMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) loglvl, where, (void *)where, from, (void *)from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) loglvl, where, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) dump_mem(loglvl, "Exception stack", frame + 4, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) char str[80], *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unsigned int x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (instruction & BIT(reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) p += sprintf(p, " r%d:%08x", reg, *stack--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (++x == 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) p = str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) printk("%s%s\n", loglvl, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (p != str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) printk("%s%s\n", loglvl, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #ifndef CONFIG_ARM_UNWIND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Stack pointers should always be within the kernels view of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * physical memory. If it is not there, then we can't dump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * out any information relating to the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int verify_stack(unsigned long sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (sp < PAGE_OFFSET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) (sp > (unsigned long)high_memory && high_memory != NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Dump out the contents of some memory nicely...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned long top)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned long first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) mm_segment_t fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * We need to switch to kernel mode so that we can use __get_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * to safely read from kernel space. Note that we now dump the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * code first, just in case the backtrace kills us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) fs = get_fs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) set_fs(KERNEL_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) for (first = bottom & ~31; first < top; first += 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned long p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) char str[sizeof(" 12345678") * 8 + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) memset(str, ' ', sizeof(str));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) str[sizeof(str) - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (p >= bottom && p < top) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (__get_user(val, (unsigned long *)p) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) sprintf(str + i * 9, " %08lx", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) sprintf(str + i * 9, " ????????");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) set_fs(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static void __dump_instr(const char *lvl, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) unsigned long addr = instruction_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) const int thumb = thumb_mode(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) const int width = thumb ? 4 : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Note that we now dump the code first, just in case the backtrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * kills us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) for (i = -4; i < 1 + !!thumb; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned int val, bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (thumb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) bad = get_user(val, &((u16 *)addr)[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) bad = get_user(val, &((u32 *)addr)[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!bad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) width, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) p += sprintf(p, "bad PC value");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) printk("%sCode: %s\n", lvl, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static void dump_instr(const char *lvl, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) mm_segment_t fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (!user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) fs = get_fs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) set_fs(KERNEL_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) __dump_instr(lvl, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) set_fs(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) __dump_instr(lvl, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #ifdef CONFIG_ARM_UNWIND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unwind_backtrace(regs, tsk, loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) unsigned int fp, mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int ok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) printk("%sBacktrace: ", loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (!tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) fp = frame_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) mode = processor_mode(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) } else if (tsk != current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) fp = thread_saved_fp(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) mode = 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) asm("mov %0, fp" : "=r" (fp) : : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) mode = 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (!fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) pr_cont("no frame pointer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ok = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) } else if (verify_stack(fp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) pr_cont("invalid frame pointer 0x%08x", fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ok = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) } else if (fp < (unsigned long)end_of_stack(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pr_cont("frame pointer underflow");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) c_backtrace(fp, mode, loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dump_backtrace(NULL, tsk, loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) #ifdef CONFIG_PREEMPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #define S_PREEMPT " PREEMPT"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #elif defined(CONFIG_PREEMPT_RT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #define S_PREEMPT " PREEMPT_RT"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #define S_PREEMPT ""
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) #define S_SMP " SMP"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) #define S_SMP ""
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #ifdef CONFIG_THUMB2_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #define S_ISA " THUMB2"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define S_ISA " ARM"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int __die(const char *str, int err, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static int die_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) str, err, ++die_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* trap and error numbers are mostly meaningless on ARM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (ret == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) print_modules();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) __show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (!user_mode(regs) || in_interrupt()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) THREAD_SIZE + (unsigned long)task_stack_page(tsk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) dump_backtrace(regs, tsk, KERN_EMERG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) dump_instr(KERN_EMERG, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static int die_owner = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static unsigned int die_nest_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static unsigned long oops_begin(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) oops_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* racy, but better than risking deadlock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) raw_local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!arch_spin_trylock(&die_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (cpu == die_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* nested oops. should stop eventually */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) arch_spin_lock(&die_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) die_nest_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) die_owner = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) console_verbose();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) bust_spinlocks(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (regs && kexec_should_crash(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) crash_kexec(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) bust_spinlocks(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) die_owner = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) die_nest_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (!die_nest_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Nest count reaches zero, release the lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) arch_spin_unlock(&die_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) raw_local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) oops_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (in_interrupt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) panic("Fatal exception in interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (panic_on_oops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) panic("Fatal exception");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (signr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) do_exit(signr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * This function is protected against re-entrancy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) void die(const char *str, struct pt_regs *regs, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) unsigned long flags = oops_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int sig = SIGSEGV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) bug_type = report_bug(regs->ARM_pc, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (bug_type != BUG_TRAP_TYPE_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) str = "Oops - BUG";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (__die(str, err, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) sig = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) oops_end(flags, regs, sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) void arm_notify_die(const char *str, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int signo, int si_code, void __user *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) unsigned long err, unsigned long trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) current->thread.error_code = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) current->thread.trap_no = trap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) force_sig_fault(signo, si_code, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) die(str, regs, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #ifdef CONFIG_GENERIC_BUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int is_valid_bugaddr(unsigned long pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #ifdef CONFIG_THUMB2_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) u16 bkpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) u32 bkpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (get_kernel_nofault(bkpt, (void *)pc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return bkpt == insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static LIST_HEAD(undef_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static DEFINE_RAW_SPINLOCK(undef_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) void register_undef_hook(struct undef_hook *hook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) raw_spin_lock_irqsave(&undef_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) list_add(&hook->node, &undef_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) raw_spin_unlock_irqrestore(&undef_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) void unregister_undef_hook(struct undef_hook *hook)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) raw_spin_lock_irqsave(&undef_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) list_del(&hook->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) raw_spin_unlock_irqrestore(&undef_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static nokprobe_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) int call_undef_hook(struct pt_regs *regs, unsigned int instr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct undef_hook *hook;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) raw_spin_lock_irqsave(&undef_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) list_for_each_entry(hook, &undef_hook, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if ((instr & hook->instr_mask) == hook->instr_val &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) fn = hook->fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) raw_spin_unlock_irqrestore(&undef_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return fn ? fn(regs, instr) : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) asmlinkage void do_undefinstr(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) unsigned int instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) void __user *pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) pc = (void __user *)instruction_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (processor_mode(regs) == SVC_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) #ifdef CONFIG_THUMB2_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (thumb_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (is_wide_instruction(instr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) u16 inst2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) instr = __opcode_thumb32_compose(instr, inst2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) instr = __mem_to_opcode_arm(*(u32 *) pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) } else if (thumb_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (get_user(instr, (u16 __user *)pc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) goto die_sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) instr = __mem_to_opcode_thumb16(instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (is_wide_instruction(instr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) unsigned int instr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (get_user(instr2, (u16 __user *)pc+1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) goto die_sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) instr2 = __mem_to_opcode_thumb16(instr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) instr = __opcode_thumb32_compose(instr, instr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (get_user(instr, (u32 __user *)pc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) goto die_sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) instr = __mem_to_opcode_arm(instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (call_undef_hook(regs, instr) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) die_sig:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) #ifdef CONFIG_DEBUG_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (user_debug & UDBG_UNDEFINED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) pr_info("%s (%d): undefined instruction: pc=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) current->comm, task_pid_nr(current), pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) __show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) dump_instr(KERN_INFO, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) arm_notify_die("Oops - undefined instruction", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) SIGILL, ILL_ILLOPC, pc, 0, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) NOKPROBE_SYMBOL(do_undefinstr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * Handle FIQ similarly to NMI on x86 systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * The runtime environment for NMIs is extremely restrictive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * (NMIs can pre-empt critical sections meaning almost all locking is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * forbidden) meaning this default FIQ handling must only be used in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * circumstances where non-maskability improves robustness, such as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * watchdog or debug logic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * This handler is not appropriate for general purpose use in drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * platform code and can be overrideen using set_fiq_handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct pt_regs *old_regs = set_irq_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) nmi_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* nop. FIQ handlers for special arch/arm features can be added here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) nmi_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) set_irq_regs(old_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * bad_mode handles the impossible case in the vectors. If you see one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * these, then it's extremely serious, and could mean you have buggy hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * It never returns, and never tries to sync. We hope that we can at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * dump out some state information...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) asmlinkage void bad_mode(struct pt_regs *regs, int reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) console_verbose();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) pr_crit("Bad mode in %s handler detected\n", handler[reason]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) die("Oops - bad mode", regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) panic("bad mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static int bad_syscall(int n, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if ((current->personality & PER_MASK) != PER_LINUX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) send_sig(SIGSEGV, current, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return regs->ARM_r0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) #ifdef CONFIG_DEBUG_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (user_debug & UDBG_SYSCALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) pr_err("[%d] %s: obsolete system call %08x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) task_pid_nr(current), current->comm, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) dump_instr(KERN_ERR, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) arm_notify_die("Oops - bad syscall", regs, SIGILL, ILL_ILLTRP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) (void __user *)instruction_pointer(regs) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) (thumb_mode(regs) ? 2 : 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) n, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return regs->ARM_r0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) __do_cache_op(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) unsigned long chunk = min(PAGE_SIZE, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (fatal_signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ret = flush_icache_user_range(start, start + chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) start += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) } while (start < end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) do_cache_op(unsigned long start, unsigned long end, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (end < start || flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (!access_ok((void __user *)start, end - start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return __do_cache_op(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * Handle all unrecognised system calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * 0x9f0000 - 0x9fffff are some more esoteric system calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) asmlinkage int arm_syscall(int no, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if ((no >> 16) != (__ARM_NR_BASE>> 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return bad_syscall(no, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) switch (no & 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) case 0: /* branch through 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) arm_notify_die("branch through zero", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) SIGSEGV, SEGV_MAPERR, NULL, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) case NR(breakpoint): /* SWI BREAK_POINT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) ptrace_break(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return regs->ARM_r0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * Flush a region from virtual address 'r0' to virtual address 'r1'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * _exclusive_. There is no alignment requirement on either address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * user space does not need to know the hardware cache layout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * r2 contains flags. It should ALWAYS be passed as ZERO until it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * is defined to be something else. For now we ignore it, but may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * the fires of hell burn in your belly if you break this rule. ;)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * (at a later date, we may want to allow this call to not flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * various aspects of the cache. Passing '0' will guarantee that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * everything necessary gets flushed to maintain consistency in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * the specified region).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) case NR(cacheflush):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) case NR(usr26):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (!(elf_hwcap & HWCAP_26BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) regs->ARM_cpsr &= ~MODE32_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return regs->ARM_r0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) case NR(usr32):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (!(elf_hwcap & HWCAP_26BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) regs->ARM_cpsr |= MODE32_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return regs->ARM_r0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) case NR(set_tls):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) set_tls(regs->ARM_r0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) case NR(get_tls):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return current_thread_info()->tp_value[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if not implemented, rather than raising SIGILL. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) way the calling program can gracefully determine whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) a feature is supported. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if ((no & 0xffff) <= 0x7ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) #ifdef CONFIG_DEBUG_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * experience shows that these seem to indicate that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * something catastrophic has happened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (user_debug & UDBG_SYSCALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) pr_err("[%d] %s: arm syscall %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) task_pid_nr(current), current->comm, no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) dump_instr(KERN_ERR, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) __show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) c_backtrace(frame_pointer(regs), processor_mode(regs), KERN_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) arm_notify_die("Oops - bad syscall(2)", regs, SIGILL, ILL_ILLTRP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) (void __user *)instruction_pointer(regs) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) (thumb_mode(regs) ? 2 : 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) no, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) #ifdef CONFIG_TLS_REG_EMUL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * We might be running on an ARMv6+ processor which should have the TLS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * register but for some reason we can't use it, or maybe an SMP system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * using a pre-ARMv6 processor (there are apparently a few prototypes like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * that in existence) and therefore access to that register must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * emulated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) int reg = (instr >> 12) & 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (reg == 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) regs->uregs[reg] = current_thread_info()->tp_value[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) regs->ARM_pc += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static struct undef_hook arm_mrc_hook = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) .instr_mask = 0x0fff0fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) .instr_val = 0x0e1d0f70,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) .cpsr_mask = PSR_T_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) .cpsr_val = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) .fn = get_tp_trap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static int __init arm_mrc_hook_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) register_undef_hook(&arm_mrc_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) late_initcall(arm_mrc_hook_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * A data abort trap was taken, but we did not handle the instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * Try to abort the user program, or panic if it was the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) asmlinkage void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) baddataabort(int code, unsigned long instr, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) unsigned long addr = instruction_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) #ifdef CONFIG_DEBUG_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (user_debug & UDBG_BADABORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) pr_err("8<--- cut here ---\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) task_pid_nr(current), current->comm, code, instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) dump_instr(KERN_ERR, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) show_pte(KERN_ERR, current->mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) arm_notify_die("unknown data abort code", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) SIGILL, ILL_ILLOPC, (void __user *)addr, instr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) void __readwrite_bug(const char *fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) pr_err("%s called, but not implemented\n", fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) EXPORT_SYMBOL(__readwrite_bug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) void __pte_error(const char *file, int line, pte_t pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) void __pmd_error(const char *file, int line, pmd_t pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) void __pgd_error(const char *file, int line, pgd_t pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) asmlinkage void __div0(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) pr_err("Division by zero in kernel.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) EXPORT_SYMBOL(__div0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) void abort(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /* if that doesn't kill us, halt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) panic("Oops failed to kill thread");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) void __init trap_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) #ifdef CONFIG_KUSER_HELPERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static void __init kuser_init(void *vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) extern char __kuser_helper_start[], __kuser_helper_end[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) int kuser_sz = __kuser_helper_end - __kuser_helper_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * vectors + 0xfe0 = __kuser_get_tls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (tls_emu || has_tls_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static inline void __init kuser_init(void *vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) #ifndef CONFIG_CPU_V7M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) memcpy(vma, lma_start, lma_end - lma_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) static void flush_vectors(void *vma, size_t offset, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) unsigned long start = (unsigned long)vma + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) unsigned long end = start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) flush_icache_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) #ifdef CONFIG_HARDEN_BRANCH_HISTORY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) int spectre_bhb_update_vectors(unsigned int method)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) void *vec_start, *vec_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (system_state > SYSTEM_SCHEDULING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return SPECTRE_VULNERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) switch (method) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) case SPECTRE_V2_METHOD_LOOP8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) vec_start = __vectors_bhb_loop8_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) vec_end = __vectors_bhb_loop8_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) case SPECTRE_V2_METHOD_BPIALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) vec_start = __vectors_bhb_bpiall_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) vec_end = __vectors_bhb_bpiall_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) pr_err("CPU%u: unknown Spectre BHB state %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) smp_processor_id(), method);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return SPECTRE_VULNERABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) copy_from_lma(vectors_page, vec_start, vec_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) flush_vectors(vectors_page, 0, vec_end - vec_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return SPECTRE_MITIGATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) void __init early_trap_init(void *vectors_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) extern char __stubs_start[], __stubs_end[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) extern char __vectors_start[], __vectors_end[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) vectors_page = vectors_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * Poison the vectors page with an undefined instruction. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * instruction is chosen to be undefined for both ARM and Thumb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * ISAs. The Thumb version is an undefined instruction with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * branch back to the undefined instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ((u32 *)vectors_base)[i] = 0xe7fddef1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * into the vector page, mapped at 0xffff0000, and ensure these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * are visible to the instruction stream.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) copy_from_lma(vectors_base, __vectors_start, __vectors_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) kuser_init(vectors_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) #else /* ifndef CONFIG_CPU_V7M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) void __init early_trap_init(void *vectors_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * on V7-M there is no need to copy the vector table to a dedicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * memory area. The address is configurable and so a table in the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * image can be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) #endif