Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  *  Copyright (C) 1991, 1992  Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Pentium III FXSR, SSE support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *	Gareth Hughes <gareth@valinux.com>, May 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Handle hardware traps and faults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/context_tracking.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/kgdb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/uprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <asm/debugreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <asm/realmode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <asm/text-patching.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <asm/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <asm/fpu/internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <asm/cpu_entry_area.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <asm/mce.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <asm/fixmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <asm/mach_traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <asm/fpu/xstate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <asm/vm86.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <asm/umip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <asm/insn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include <asm/insn-eval.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include <asm/x86_init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include <asm/proto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include <asm/processor-flags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #include <asm/proto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) DECLARE_BITMAP(system_vectors, NR_VECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static inline void cond_local_irq_enable(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	if (regs->flags & X86_EFLAGS_IF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static inline void cond_local_irq_disable(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	if (regs->flags & X86_EFLAGS_IF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) __always_inline int is_valid_bugaddr(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	if (addr < TASK_SIZE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	 * We got #UD, if the text isn't readable we'd have gotten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	 * a different exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	return *(unsigned short *)addr == INSN_UD2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static nokprobe_inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		  struct pt_regs *regs,	long error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	if (v8086_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		 * On nmi (interrupt 2), do_trap should not be called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		if (trapnr < X86_TRAP_UD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 			if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 						error_code, trapnr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	} else if (!user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		if (fixup_exception(regs, trapnr, error_code, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		tsk->thread.error_code = error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		tsk->thread.trap_nr = trapnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		die(str, regs, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	 * We want error_code and trap_nr set for userspace faults and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	 * kernelspace faults which result in die(), but not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	 * kernelspace faults which are fixed up.  die() gives the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	 * process no chance to handle the signal and notice the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	 * kernel fault information, so that won't result in polluting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	 * the information about previously queued, but not yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	 * delivered, faults.  See also exc_general_protection below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	tsk->thread.error_code = error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	tsk->thread.trap_nr = trapnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) static void show_signal(struct task_struct *tsk, int signr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 			const char *type, const char *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 			struct pt_regs *regs, long error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	    printk_ratelimit()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 			tsk->comm, task_pid_nr(tsk), type, desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 			regs->ip, regs->sp, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		print_vma_addr(KERN_CONT " in ", regs->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	long error_code, int sicode, void __user *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	show_signal(tsk, signr, "trap ", str, regs, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	if (!sicode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		force_sig(signr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		force_sig_fault(signr, sicode, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) NOKPROBE_SYMBOL(do_trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	unsigned long trapnr, int signr, int sicode, void __user *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 			NOTIFY_STOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		cond_local_irq_enable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		cond_local_irq_disable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * Posix requires to provide the address of the faulting instruction for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * This address is usually regs->ip, but when an uprobe moved the code out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  * of line then regs->ip points to the XOL code which would confuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * anything which analyzes the fault address vs. the unmodified binary. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * a trap happened in XOL code then uprobe maps regs->ip back to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  * original instruction address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	return (void __user *)uprobe_get_trap_addr(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) DEFINE_IDTENTRY(exc_divide_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		      FPE_INTDIV, error_get_trap_addr(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) DEFINE_IDTENTRY(exc_overflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) #ifdef CONFIG_X86_F00F_BUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) void handle_invalid_op(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) static inline void handle_invalid_op(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		      ILL_ILLOPN, error_get_trap_addr(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) static noinstr bool handle_bug(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	bool handled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	if (!is_valid_bugaddr(regs->ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		return handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	 * All lies, just get the WARN/BUG out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	instrumentation_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	 * Since we're emulating a CALL with exceptions, restore the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	 * state to what it was at the exception site.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	if (regs->flags & X86_EFLAGS_IF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		raw_local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		regs->ip += LEN_UD2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		handled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	if (regs->flags & X86_EFLAGS_IF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		raw_local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	instrumentation_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	return handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) DEFINE_IDTENTRY_RAW(exc_invalid_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	irqentry_state_t state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	 * We use UD2 as a short encoding for 'CALL __WARN', as such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	 * handle it before exception entry to avoid recursive WARN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	 * in case exception entry is the one triggering WARNs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	if (!user_mode(regs) && handle_bug(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	state = irqentry_enter(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	instrumentation_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	handle_invalid_op(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	instrumentation_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	irqentry_exit(regs, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) DEFINE_IDTENTRY(exc_coproc_segment_overrun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	do_error_trap(regs, 0, "coprocessor segment overrun",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		      X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		      0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		      SIGBUS, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		      0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	char *str = "alignment check";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		die("Split lock detected\n", regs, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	if (handle_user_split_lock(regs, error_code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		error_code, BUS_ADRALN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) __visible void __noreturn handle_stack_overflow(const char *message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 						struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 						unsigned long fault_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		 (void *)fault_address, current->stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		 (char *)current->stack + THREAD_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	die(message, regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	/* Be absolutely certain we don't return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	panic("%s", message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  * Runs on an IST stack for x86_64 and on a special task stack for x86_32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  * On x86_64, this is more or less a normal kernel entry.  Notwithstanding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * SDM's warnings about double faults being unrecoverable, returning works as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  * expected.  Presumably what the SDM actually means is that the CPU may get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * the register state wrong on entry, so returning could be a bad idea.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  * Various CPU engineers have promised that double faults due to an IRET fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  * while the stack is read-only are, in fact, recoverable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * On x86_32, this is entered through a task gate, and regs are synthesized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  * from the TSS.  Returning is, in principle, okay, but changes to regs will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * be lost.  If, for some reason, we need to return to a context with modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  * regs, the shim code could be adjusted to synchronize the registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  * to be read before doing anything else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) DEFINE_IDTENTRY_DF(exc_double_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	static const char str[] = "double fault";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	unsigned long address = read_cr2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) #ifdef CONFIG_X86_ESPFIX64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	extern unsigned char native_irq_return_iret[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	 * If IRET takes a non-IST fault on the espfix64 stack, then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	 * end up promoting it to a doublefault.  In that case, take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	 * advantage of the fact that we're not using the normal (TSS.sp0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	 * stack right now.  We can write a fake #GP(0) frame at TSS.sp0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	 * and then modify our own IRET frame so that, when we return,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	 * we land directly at the #GP(0) vector with the stack already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	 * set up according to its expectations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	 * The net result is that our #GP handler will think that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	 * entered from usermode with the bad user context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	 * No need for nmi_enter() here because we don't use RCU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		regs->cs == __KERNEL_CS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		regs->ip == (unsigned long)native_irq_return_iret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		unsigned long *p = (unsigned long *)regs->sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		 * regs->sp points to the failing IRET frame on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		 * ESPFIX64 stack.  Copy it to the entry stack.  This fills
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		 * in gpregs->ss through gpregs->ip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		gpregs->ip	= p[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		gpregs->cs	= p[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		gpregs->flags	= p[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		gpregs->sp	= p[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		gpregs->ss	= p[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		gpregs->orig_ax = 0;  /* Missing (lost) #GP error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		 * Adjust our frame so that we return straight to the #GP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		 * vector with the expected RSP value.  This is safe because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		 * we won't enable interupts or schedule before we invoke
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		 * general_protection, so nothing will clobber the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		 * frame we just set up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		 * We will enter general_protection with kernel GSBASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		 * which is what the stub expects, given that the faulting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		 * RIP will be the IRET instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		regs->ip = (unsigned long)asm_exc_general_protection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		regs->sp = (unsigned long)&gpregs->orig_ax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	irqentry_nmi_enter(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	instrumentation_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	tsk->thread.error_code = error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	tsk->thread.trap_nr = X86_TRAP_DF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	 * If we overflow the stack into a guard page, the CPU will fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	 * to deliver #PF and will send #DF instead.  Similarly, if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	 * take any non-IST exception while too close to the bottom of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	 * the stack, the processor will get a page fault while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	 * delivering the exception and will generate a double fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	 * Page-Fault Exception (#PF):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	 *   Processors update CR2 whenever a page fault is detected. If a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	 *   second page fault occurs while an earlier page fault is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	 *   delivered, the faulting linear address of the second fault will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	 *   overwrite the contents of CR2 (replacing the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	 *   address). These updates to CR2 occur even if the page fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	 *   results in a double fault or occurs during the delivery of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	 *   double fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	 * The logic below has a small possibility of incorrectly diagnosing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	 * some errors as stack overflows.  For example, if the IDT or GDT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	 * gets corrupted such that #GP delivery fails due to a bad descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	 * causing #GP and we hit this condition while CR2 coincidentally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	 * points to the stack guard page, we'll think we overflowed the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	 * stack.  Given that we're going to panic one way or another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	 * if this happens, this isn't necessarily worth fixing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	 * If necessary, we could improve the test by only diagnosing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	 * a stack overflow if the saved RSP points within 47 bytes of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	 * the bottom of the stack: if RSP == tsk_stack + 48 and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	 * take an exception, the stack is already aligned and there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	 * possible error code, so a stack overflow would *not* double
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	 * fault.  With any less space left, exception delivery could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	 * fail, and, as a practical matter, we've overflowed the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	 * stack even if the actual trigger for the double fault was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	 * something else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if ((unsigned long)task_stack_page(tsk) - 1 - address < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		handle_stack_overflow("kernel stack overflow (double-fault)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 				      regs, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	die("double fault", regs, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	panic("Machine halted.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	instrumentation_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) DEFINE_IDTENTRY(exc_bounds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	if (notify_die(DIE_TRAP, "bounds", regs, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	cond_local_irq_enable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		die("bounds", regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	cond_local_irq_disable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) enum kernel_gp_hint {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	GP_NO_HINT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	GP_NON_CANONICAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	GP_CANONICAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488)  * When an uncaught #GP occurs, try to determine the memory address accessed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489)  * the instruction and return that address to the caller. Also, try to figure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490)  * out whether any part of the access to that address was non-canonical.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 						 unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	u8 insn_buf[MAX_INSN_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	struct insn insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			MAX_INSN_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		return GP_NO_HINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	insn_get_modrm(&insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	insn_get_sib(&insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	*addr = (unsigned long)insn_get_addr_ref(&insn, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (*addr == -1UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		return GP_NO_HINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	 * Check that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	 *  - the operand is not in the kernel half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	 *  - the last byte of the operand is not in the user canonical half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	if (*addr < ~__VIRTUAL_MASK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	    *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		return GP_NON_CANONICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	return GP_CANONICAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) #define GPFSTR "general protection fault"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) static bool fixup_iopl_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	struct thread_struct *t = &current->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	unsigned char byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	unsigned long ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	if (!IS_ENABLED(CONFIG_X86_IOPL_IOPERM) || t->iopl_emul != 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	ip = insn_get_effective_ip(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	if (!ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	if (get_user(byte, (const char __user *)ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	if (byte != 0xfa && byte != 0xfb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	if (!t->iopl_warn && printk_ratelimit()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		pr_err("%s[%d] attempts to use CLI/STI, pretending it's a NOP, ip:%lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		       current->comm, task_pid_nr(current), ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		print_vma_addr(KERN_CONT " in ", ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		t->iopl_warn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	regs->ip += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	enum kernel_gp_hint hint = GP_NO_HINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	unsigned long gp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	cond_local_irq_enable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	if (static_cpu_has(X86_FEATURE_UMIP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		if (user_mode(regs) && fixup_umip_exception(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	if (v8086_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		if (fixup_iopl_exception(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		tsk->thread.error_code = error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		tsk->thread.trap_nr = X86_TRAP_GP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		force_sig(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	tsk->thread.error_code = error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	tsk->thread.trap_nr = X86_TRAP_GP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	 * To be potentially processing a kprobe fault and to trust the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	 * from kprobe_running(), we have to be non-preemptible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	if (!preemptible() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	    kprobe_running() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	    kprobe_fault_handler(regs, X86_TRAP_GP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	if (ret == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	if (error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		hint = get_kernel_gp_address(regs, &gp_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	if (hint != GP_NO_HINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 			 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 						    : "maybe for address",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			 gp_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	 * KASAN is interested only in the non-canonical case, clear it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	 * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	if (hint != GP_NON_CANONICAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		gp_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	die_addr(desc, regs, error_code, gp_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	cond_local_irq_disable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) static bool do_int3(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 			 SIGTRAP) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) #ifdef CONFIG_KPROBES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (kprobe_int3_handler(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	return res == NOTIFY_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) NOKPROBE_SYMBOL(do_int3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) static void do_int3_user(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	if (do_int3(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	cond_local_irq_enable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	cond_local_irq_disable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) DEFINE_IDTENTRY_RAW(exc_int3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	 * poke_int3_handler() is completely self contained code; it does (and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	 * must) *NOT* call out to anything, lest it hits upon yet another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	 * INT3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	if (poke_int3_handler(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	 * irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	 * and therefore can trigger INT3, hence poke_int3_handler() must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	 * be done before. If the entry came from kernel mode, then use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	 * nmi_enter() because the INT3 could have been hit in any context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	 * including NMI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		irqentry_enter_from_user_mode(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		instrumentation_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		do_int3_user(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		instrumentation_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		irqentry_exit_to_user_mode(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		irqentry_state_t irq_state = irqentry_nmi_enter(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		instrumentation_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		if (!do_int3(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			die("int3", regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		instrumentation_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		irqentry_nmi_exit(regs, irq_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  * Help handler running on a per-cpu (IST or entry trampoline) stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703)  * to switch to the normal thread stack if the interrupted code was in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704)  * user mode. The actual stack switch is done in entry_64.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	if (regs != eregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		*regs = *eregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	return regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) #ifdef CONFIG_AMD_MEM_ENCRYPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	unsigned long sp, *stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	struct stack_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	struct pt_regs *regs_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	 * In the SYSCALL entry path the RSP value comes from user-space - don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	 * trust it and switch to the current kernel stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	if (ip_within_syscall_gap(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		sp = this_cpu_read(cpu_current_top_of_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		goto sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	 * From here on the RSP value is trusted. Now check whether entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 * happened from a safe stack. Not safe are the entry or unknown stacks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	 * use the fall-back stack instead in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	sp    = regs->sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	stack = (unsigned long *)sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	    info.type > STACK_TYPE_EXCEPTION_LAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		sp = __this_cpu_ist_top_va(VC2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) sync:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	 * Found a safe stack - switch to it as if the entry didn't happen via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	 * IST stack. The code below only copies pt_regs, the real switch happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	 * in assembly code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	regs_ret = (struct pt_regs *)sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	*regs_ret = *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	return regs_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) struct bad_iret_stack {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	void *error_entry_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	struct pt_regs regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) asmlinkage __visible noinstr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	 * This is called from entry_64.S early in handling a fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	 * caused by a bad iret to user mode.  To handle the fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	 * correctly, we want to move our stack frame to where it would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	 * be had we entered directly on the entry stack (rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	 * just below the IRET frame) and we want to pretend that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	 * exception came from the IRET target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	struct bad_iret_stack tmp, *new_stack =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		(struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	/* Copy the IRET target to the temporary storage. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	__memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	/* Copy the remainder of the stack from the current stack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	__memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	/* Update the entry stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	__memcpy(new_stack, &tmp, sizeof(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	BUG_ON(!user_mode(&new_stack->regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	return new_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) static bool is_sysenter_singlestep(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	 * We don't try for precision here.  If we're anywhere in the region of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	 * code that can be single-stepped in the SYSENTER entry path, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	 * assume that this is a useless single-step trap due to SYSENTER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	 * being invoked with TF set.  (We don't know in advance exactly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	 * which instructions will be hit because BTF could plausibly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	 * be set.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		(unsigned long)__end_SYSENTER_singlestep_region -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		(unsigned long)__begin_SYSENTER_singlestep_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) #elif defined(CONFIG_IA32_EMULATION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		(unsigned long)__end_entry_SYSENTER_compat -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		(unsigned long)entry_SYSENTER_compat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) static __always_inline unsigned long debug_read_clear_dr6(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	unsigned long dr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	 * The Intel SDM says:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	 *   Certain debug exceptions may clear bits 0-3. The remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	 *   contents of the DR6 register are never cleared by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	 *   processor. To avoid confusion in identifying debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	 *   exceptions, debug handlers should clear the register before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	 *   returning to the interrupted task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	 * Keep it simple: clear DR6 immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	get_debugreg(dr6, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	set_debugreg(DR6_RESERVED, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	return dr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  * Our handling of the processor debug registers is non-trivial.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  * We do not clear them on entry and exit from the kernel. Therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  * it is possible to get a watchpoint trap here from inside the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839)  * However, the code in ./ptrace.c has ensured that the user can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840)  * only set watchpoints on userspace addresses. Therefore the in-kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841)  * watchpoint trap can only occur in code which is reading/writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842)  * from user space. Such code must not hold kernel locks (since it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843)  * can equally take a page fault), therefore it is safe to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844)  * force_sig_info even though that claims and releases locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846)  * Code in ./signal.c ensures that the debug control register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847)  * is restored before we deliver any signal, and therefore that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848)  * user code runs with the correct debug control register even though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849)  * we clear it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851)  * Being careful here means that we don't have to be as careful in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852)  * lot of more complicated places (task switching can be a bit lazy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853)  * about restoring all the debug state, and ptrace doesn't have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854)  * find every occurrence of the TF bit that could be saved away even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855)  * by user code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857)  * May run on IST stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) static bool notify_debug(struct pt_regs *regs, unsigned long *dr6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	 * Notifiers will clear bits in @dr6 to indicate the event has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	 * consumed - hw_breakpoint_handler(), single_stop_cont().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	 * Notifiers will set bits in @virtual_dr6 to indicate the desire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	 * for signals - ptrace_triggered(), kgdb_hw_overflow_handler().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) static __always_inline void exc_debug_kernel(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 					     unsigned long dr6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	 * Disable breakpoints during exception handling; recursive exceptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	 * are exceedingly 'fun'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	 * Since this function is NOKPROBE, and that also applies to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	 * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	 * HW_BREAKPOINT_W on our stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	 * Entry text is excluded for HW_BP_X and cpu_entry_area, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	 * includes the entry stack is excluded for everything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	unsigned long dr7 = local_db_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	irqentry_state_t irq_state = irqentry_nmi_enter(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	instrumentation_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	 * If something gets miswired and we end up here for a user mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	 * #DB, we will malfunction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	WARN_ON_ONCE(user_mode(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (test_thread_flag(TIF_BLOCKSTEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		 * The SDM says "The processor clears the BTF flag when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		 * generates a debug exception." but PTRACE_BLOCKSTEP requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		 * it for userspace, but we just took a kernel #DB, so re-set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		 * BTF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		unsigned long debugctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		debugctl |= DEBUGCTLMSR_BTF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	 * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	 * watchpoint at the same time then that will still be handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		dr6 &= ~DR_STEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	if (kprobe_debug_handler(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	 * The kernel doesn't use INT1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	if (!dr6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	if (notify_debug(regs, &dr6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	 * The kernel doesn't use TF single-step outside of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	 *  - Kprobes, consumed through kprobe_debug_handler()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	 *  - KGDB, consumed through notify_debug()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	 * So if we get here with DR_STEP set, something is wonky.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	 * A known way to trigger this is through QEMU's GDB stub,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	 * which leaks #DB into the guest and causes IST recursion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	if (WARN_ON_ONCE(dr6 & DR_STEP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		regs->flags &= ~X86_EFLAGS_TF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	instrumentation_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	irqentry_nmi_exit(regs, irq_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	local_db_restore(dr7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) static __always_inline void exc_debug_user(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 					   unsigned long dr6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	bool icebp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	 * If something gets miswired and we end up here for a kernel mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	 * #DB, we will malfunction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	WARN_ON_ONCE(!user_mode(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	 * NB: We can't easily clear DR7 here because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	 * irqentry_exit_to_usermode() can invoke ptrace, schedule, access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	 * user memory, etc.  This means that a recursive #DB is possible.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	 * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	 * Since we're not on the IST stack right now, everything will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 * fine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	irqentry_enter_from_user_mode(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	instrumentation_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	 * Start the virtual/ptrace DR6 value with just the DR_STEP mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	 * of the real DR6. ptrace_triggered() will set the DR_TRAPn bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	 * Userspace expects DR_STEP to be visible in ptrace_get_debugreg(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	 * even if it is not the result of PTRACE_SINGLESTEP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	current->thread.virtual_dr6 = (dr6 & DR_STEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	 * The SDM says "The processor clears the BTF flag when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	 * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	clear_thread_flag(TIF_BLOCKSTEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	 * If dr6 has no reason to give us about the origin of this trap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	 * then it's very likely the result of an icebp/int01 trap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	 * User wants a sigtrap for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	icebp = !dr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	if (notify_debug(regs, &dr6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	/* It's safe to allow irq's after DR6 has been saved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	if (v8086_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		goto out_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	/* Add the virtual_dr6 bits for signals. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	dr6 |= current->thread.virtual_dr6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		send_sigtrap(regs, 0, get_si_code(dr6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) out_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	instrumentation_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	irqentry_exit_to_user_mode(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /* IST stack entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) DEFINE_IDTENTRY_DEBUG(exc_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	exc_debug_kernel(regs, debug_read_clear_dr6());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /* User entry, runs on regular task stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	exc_debug_user(regs, debug_read_clear_dr6());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /* 32 bit does not have separate entry points. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) DEFINE_IDTENTRY_RAW(exc_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	unsigned long dr6 = debug_read_clear_dr6();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		exc_debug_user(regs, dr6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		exc_debug_kernel(regs, dr6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)  * Note that we play around with the 'TS' bit in an attempt to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)  * the correct behaviour even in the presence of the asynchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)  * IRQ13 behaviour
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static void math_error(struct pt_regs *regs, int trapnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	struct task_struct *task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	struct fpu *fpu = &task->thread.fpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	int si_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 						"simd exception";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	cond_local_irq_enable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	if (!user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		if (fixup_exception(regs, trapnr, 0, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		task->thread.error_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		task->thread.trap_nr = trapnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			       SIGFPE) != NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			die(str, regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	 * Save the info for the exception handler and clear the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	fpu__save(fpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	task->thread.trap_nr	= trapnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	task->thread.error_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	si_code = fpu__exception_code(fpu, trapnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	/* Retry when we get spurious exceptions: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	if (!si_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	force_sig_fault(SIGFPE, si_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			(void __user *)uprobe_get_trap_addr(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	cond_local_irq_disable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) DEFINE_IDTENTRY(exc_coprocessor_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	math_error(regs, X86_TRAP_MF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) DEFINE_IDTENTRY(exc_simd_coprocessor_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		/* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		if (!static_cpu_has(X86_FEATURE_XMM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			__exc_general_protection(regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	math_error(regs, X86_TRAP_XF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	 * This addresses a Pentium Pro Erratum:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	 * PROBLEM: If the APIC subsystem is configured in mixed mode with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	 * Virtual Wire mode implemented through the local APIC, an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	 * interrupt vector of 0Fh (Intel reserved encoding) may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	 * generated by the local APIC (Int 15).  This vector may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	 * generated upon receipt of a spurious interrupt (an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	 * which is removed before the system receives the INTA sequence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	 * instead of the programmed 8259 spurious interrupt vector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	 * IMPLICATION: The spurious interrupt vector programmed in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	 * 8259 is normally handled by an operating system's spurious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	 * interrupt handler. However, a vector of 0Fh is unknown to some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	 * operating systems, which would crash if this erratum occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	 * In theory this could be limited to 32bit, but the handler is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	 * hurting and who knows which other CPUs suffer from this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) DEFINE_IDTENTRY(exc_device_not_available)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	unsigned long cr0 = read_cr0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) #ifdef CONFIG_MATH_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		struct math_emu_info info = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		cond_local_irq_enable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		info.regs = regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		math_emulate(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		cond_local_irq_disable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	/* This should not happen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		/* Try to fix it up and carry on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		write_cr0(cr0 & ~X86_CR0_TS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		 * Something terrible happened, and we're better off trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		 * to kill the task than getting stuck in a never-ending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		 * loop of #NM faults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		die("unexpected #NM exception", regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) DEFINE_IDTENTRY_SW(iret_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	if (notify_die(DIE_TRAP, "iret exception", regs, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 			X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			ILL_BADSTK, (void __user *)NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) void __init trap_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	/* Init cpu_entry_area before IST entries are set up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	setup_cpu_entry_areas();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	/* Init GHCB memory pages when running as an SEV-ES guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	sev_es_init_vc_handling();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	idt_setup_traps();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	 * Should be a barrier for any external CPU state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	idt_setup_ist_traps();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }