^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/fpu/internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/sev-es.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) typedef bool (*ex_handler_t)(const struct exception_table_entry *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct pt_regs *, int, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) ex_fixup_addr(const struct exception_table_entry *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) return (unsigned long)&x->fixup + x->fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static inline ex_handler_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) ex_fixup_handler(const struct exception_table_entry *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) return (ex_handler_t)((unsigned long)&x->handler + x->handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) __visible bool ex_handler_default(const struct exception_table_entry *fixup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct pt_regs *regs, int trapnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned long error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned long fault_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) regs->ip = ex_fixup_addr(fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) EXPORT_SYMBOL(ex_handler_default);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) __visible bool ex_handler_fault(const struct exception_table_entry *fixup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct pt_regs *regs, int trapnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long fault_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) regs->ip = ex_fixup_addr(fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) regs->ax = trapnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) EXPORT_SYMBOL_GPL(ex_handler_fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Handler for when we fail to restore a task's FPU state. We should never get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * here because the FPU state of a task using the FPU (task->thread.fpu.state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * should always be valid. However, past bugs have allowed userspace to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * These caused XRSTOR to fail when switching to the task, leaking the FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * registers of the task previously executing on the CPU. Mitigate this class
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * of vulnerability by restoring from the initial state (essentially, zeroing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * out all the FPU registers) if we can't restore from the task's FPU state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct pt_regs *regs, int trapnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned long error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long fault_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) regs->ip = ex_fixup_addr(fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) (void *)instruction_pointer(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __copy_kernel_to_fpregs(&init_fpstate, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) EXPORT_SYMBOL_GPL(ex_handler_fprestore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct pt_regs *regs, int trapnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned long error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned long fault_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) regs->ip = ex_fixup_addr(fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) EXPORT_SYMBOL(ex_handler_uaccess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) __visible bool ex_handler_copy(const struct exception_table_entry *fixup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct pt_regs *regs, int trapnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unsigned long error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned long fault_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) regs->ip = ex_fixup_addr(fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) regs->ax = trapnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) EXPORT_SYMBOL(ex_handler_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) __visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct pt_regs *regs, int trapnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned long error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long fault_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) (unsigned int)regs->cx, regs->ip, (void *)regs->ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) show_stack_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Pretend that the read succeeded and returned 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) regs->ip = ex_fixup_addr(fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) regs->ax = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) regs->dx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) __visible bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct pt_regs *regs, int trapnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned long error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) unsigned long fault_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) (unsigned int)regs->cx, (unsigned int)regs->dx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) (unsigned int)regs->ax, regs->ip, (void *)regs->ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) show_stack_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Pretend that the write succeeded. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) regs->ip = ex_fixup_addr(fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) __visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct pt_regs *regs, int trapnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long fault_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (static_cpu_has(X86_BUG_NULL_SEG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) asm volatile ("mov %0, %%fs" : : "rm" (0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return ex_handler_default(fixup, regs, trapnr, error_code, fault_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) EXPORT_SYMBOL(ex_handler_clear_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) enum handler_type ex_get_fault_handler_type(unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) const struct exception_table_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ex_handler_t handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) e = search_exception_tables(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return EX_HANDLER_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) handler = ex_fixup_handler(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (handler == ex_handler_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return EX_HANDLER_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) else if (handler == ex_handler_uaccess || handler == ex_handler_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return EX_HANDLER_UACCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return EX_HANDLER_OTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) __nocfi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned long fault_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) const struct exception_table_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ex_handler_t handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #ifdef CONFIG_PNPBIOS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) extern u32 pnp_bios_is_utter_crap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) pnp_bios_is_utter_crap = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) __asm__ volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) "movl %0, %%esp\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) "jmp *%1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) : : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) panic("do_trap: can't hit this");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) e = search_exception_tables(regs->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (!e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) handler = ex_fixup_handler(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return handler(e, regs, trapnr, error_code, fault_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) extern unsigned int early_recursion_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* Restricted version used during very early boot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Ignore early NMIs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (trapnr == X86_TRAP_NMI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (early_recursion_flag > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) goto halt_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * Old CPUs leave the high bits of CS on the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * undefined. I'm not sure which CPUs do this, but at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * the 486 DX works this way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * Xen pv domains are not using the default __KERNEL_CS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * The full exception fixup machinery is available as soon as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * the early IDT is loaded. This means that it is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * responsibility of extable users to either function correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * when handlers are invoked early or to simply avoid causing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * exceptions before they're ready to handle them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * This is better than filtering which handlers can be used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * because refusing to call a handler here is guaranteed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * result in a hard-to-debug panic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Keep in mind that not all vectors actually get here. Early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * page faults, for example, are special.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (fixup_exception(regs, trapnr, regs->orig_ax, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (trapnr == X86_TRAP_UD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* Skip the ud2. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) regs->ip += LEN_UD2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * If this was a BUG and report_bug returns or if this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * was just a normal #UD, we want to continue onward and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * crash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) regs->orig_ax, read_cr2());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) halt_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) while (true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }