^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2007-2010 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Modified by Cort Dougan (cort@cs.nmt.edu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * and Paul Mackerras (paulus@samba.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * This file handles the architecture-dependent parts of hardware exceptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/pkeys.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/user.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/module.h> /* print_modules */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/prctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/backlight.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/context_tracking.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/kmsg_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/emulated_ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/rtas.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <asm/pmc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #ifdef CONFIG_PMAC_BACKLIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <asm/backlight.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <asm/tm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <asm/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <asm/ppc-opcode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <asm/rio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <asm/fadump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <asm/tm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <asm/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <asm/asm-prototypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <asm/hmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <sysdev/fsl_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <asm/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <asm/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int (*__debugger)(struct pt_regs *regs) __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) EXPORT_SYMBOL(__debugger);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) EXPORT_SYMBOL(__debugger_ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) EXPORT_SYMBOL(__debugger_bpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) EXPORT_SYMBOL(__debugger_sstep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) EXPORT_SYMBOL(__debugger_iabr_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) EXPORT_SYMBOL(__debugger_break_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) EXPORT_SYMBOL(__debugger_fault_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Transactional Memory trap debug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #ifdef TM_DEBUG_SW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define TM_DEBUG(x...) printk(KERN_INFO x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define TM_DEBUG(x...) do { } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static const char *signame(int signr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) switch (signr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) case SIGBUS: return "bus error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) case SIGFPE: return "floating point exception";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) case SIGILL: return "illegal instruction";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) case SIGSEGV: return "segfault";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) case SIGTRAP: return "unhandled trap";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return "unknown signal";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * Trap & Exception support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #ifdef CONFIG_PMAC_BACKLIGHT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void pmac_backlight_unblank(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) mutex_lock(&pmac_backlight_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (pmac_backlight) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct backlight_properties *props;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) props = &pmac_backlight->props;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) props->brightness = props->max_brightness;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) props->power = FB_BLANK_UNBLANK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) backlight_update_status(pmac_backlight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) mutex_unlock(&pmac_backlight_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline void pmac_backlight_unblank(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * If oops/die is expected to crash the machine, return true here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * This should not be expected to be 100% accurate, there may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * notifiers registered or other unexpected conditions that may bring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * down the kernel. Or if the current process in the kernel is holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * locks or has other critical state, the kernel may become effectively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * unusable anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) bool die_will_crash(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (should_fadump_crash())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (kexec_should_crash(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (in_interrupt() || panic_on_oops ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) !current->pid || is_global_init(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static int die_owner = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static unsigned int die_nest_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static int die_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) extern void panic_flush_kmsg_start(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * These are mostly taken from kernel/panic.c, but tries to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * relatively minimal work. Don't use delay functions (TB may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * be broken), don't crash dump (need to set a firmware log),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * don't run notifiers. We do want to get some information to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Linux console.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) console_verbose();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) bust_spinlocks(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) extern void panic_flush_kmsg_end(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) printk_safe_flush_on_panic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) kmsg_dump(KMSG_DUMP_PANIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) bust_spinlocks(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) debug_locks_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) console_flush_on_panic(CONSOLE_FLUSH_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static unsigned long oops_begin(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) oops_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* racy, but better than risking deadlock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) raw_local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (!arch_spin_trylock(&die_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (cpu == die_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* nested oops. should stop eventually */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) arch_spin_lock(&die_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) die_nest_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) die_owner = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) console_verbose();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) bust_spinlocks(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (machine_is(powermac))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pmac_backlight_unblank();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) NOKPROBE_SYMBOL(oops_begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static void oops_end(unsigned long flags, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) int signr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) bust_spinlocks(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) die_nest_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) oops_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (!die_nest_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* Nest count reaches zero, release the lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) die_owner = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) arch_spin_unlock(&die_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) raw_local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * system_reset_excption handles debugger, crash dump, panic, for 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (TRAP(regs) == 0x100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) crash_fadump(regs, "die oops");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (kexec_should_crash(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) crash_kexec(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (!signr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * While our oops output is serialised by a spinlock, output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * from panic() called below can race and corrupt it. If we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * know we are going to panic, delay for 1 second so we have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * chance to get clean backtraces from all CPUs that are oopsing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (in_interrupt() || panic_on_oops || !current->pid ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) is_global_init(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) mdelay(MSEC_PER_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (panic_on_oops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) panic("Fatal exception");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) do_exit(signr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) NOKPROBE_SYMBOL(oops_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static char *get_mmu_str(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (early_radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return " MMU=Radix";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return " MMU=Hash";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static int __die(const char *str, struct pt_regs *regs, long err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) PAGE_SIZE / 1024, get_mmu_str(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) IS_ENABLED(CONFIG_NUMA) ? " NUMA" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ppc_md.name ? ppc_md.name : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) print_modules();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) NOKPROBE_SYMBOL(__die);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) void die(const char *str, struct pt_regs *regs, long err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * system_reset_excption handles debugger, crash dump, panic, for 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (TRAP(regs) != 0x100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (debugger(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) flags = oops_begin(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (__die(str, regs, err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) oops_end(flags, regs, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) NOKPROBE_SYMBOL(die);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) void user_single_step_report(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static void show_signal_msg(int signr, struct pt_regs *regs, int code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) DEFAULT_RATELIMIT_BURST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!show_unhandled_signals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!unhandled_signal(current, signr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!__ratelimit(&rs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) current->comm, current->pid, signame(signr), signr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) addr, regs->nip, regs->link, code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) print_vma_addr(KERN_CONT " in ", regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) show_user_instructions(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static bool exception_common(int signr, struct pt_regs *regs, int code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) die("Exception in kernel mode", regs, signr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) show_signal_msg(signr, regs, code, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) current->thread.trap_nr = code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * to capture the content, if the task gets killed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) thread_pkey_regs_save(¤t->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) force_sig_pkuerr((void __user *) addr, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (!exception_common(signr, regs, code, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) force_sig_fault(signr, code, (void __user *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * The interrupt architecture has a quirk in that the HV interrupts excluding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * that an interrupt handler must do is save off a GPR into a scratch register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * that it is non-reentrant, which leads to random data corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * The solution is for NMI interrupts in HV mode to check if they originated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * from these critical HV interrupt regions. If so, then mark them not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * recoverable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * An alternative would be for HV NMIs to use SPRG for scratch to avoid the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * that would work. However any other guest OS that may have the SPRG live
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * and MSR[RI]=1 could encounter silent corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * Builds that do not support KVM could take this second option to increase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * the recoverability of NMIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #ifdef CONFIG_PPC_POWERNV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) unsigned long kbase = (unsigned long)_stext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) unsigned long nip = regs->nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (!(regs->msr & MSR_RI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (!(regs->msr & MSR_HV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (regs->msr & MSR_PR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * Now test if the interrupt has hit a range that may be using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * problem ranges all run un-relocated. Test real and virt modes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * at the same time by droping the high bit of the nip (virt mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * entry points still have the +0x4000 offset).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) nip &= ~0xc000000000000000ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) goto nonrecoverable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) goto nonrecoverable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) goto nonrecoverable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) goto nonrecoverable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* Trampoline code runs un-relocated so subtract kbase. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (nip >= (unsigned long)(start_real_trampolines - kbase) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) nip < (unsigned long)(end_real_trampolines - kbase))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) goto nonrecoverable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (nip >= (unsigned long)(start_virt_trampolines - kbase) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) nip < (unsigned long)(end_virt_trampolines - kbase))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) goto nonrecoverable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) nonrecoverable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) regs->msr &= ~MSR_RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) void system_reset_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) unsigned long hsrr0, hsrr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) bool saved_hsrrs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) this_cpu_set_ftrace_enabled(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) nmi_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * System reset can interrupt code where HSRRs are live and MSR[RI]=1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * The system reset interrupt itself may clobber HSRRs (e.g., to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * OPAL), so save them here and restore them before returning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * Machine checks don't need to save HSRRs, as the real mode handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * is careful to avoid them, and the regular handler is not delivered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * as an NMI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (cpu_has_feature(CPU_FTR_HVMODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) hsrr0 = mfspr(SPRN_HSRR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) hsrr1 = mfspr(SPRN_HSRR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) saved_hsrrs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) hv_nmi_check_nonrecoverable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) __this_cpu_inc(irq_stat.sreset_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* See if any machine dependent calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (ppc_md.system_reset_exception) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (ppc_md.system_reset_exception(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (debugger(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) kmsg_dump(KMSG_DUMP_OOPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * A system reset is a request to dump, so we always send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * it through the crashdump code (if fadump or kdump are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * registered).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) crash_fadump(regs, "System Reset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) crash_kexec(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * We aren't the primary crash CPU. We need to send it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * to a holding pattern to avoid it ending up in the panic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) crash_kexec_secondary(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * No debugger or crash dump registered, print logs then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * panic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) die("System Reset", regs, SIGABRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) nmi_panic(regs, "System Reset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) BUG_ON(get_paca()->in_nmi == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (get_paca()->in_nmi > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) die("Unrecoverable nested System Reset", regs, SIGABRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* Must die if the interrupt is not recoverable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (!(regs->msr & MSR_RI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* For the reason explained in die_mce, nmi_exit before die */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) nmi_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) die("Unrecoverable System Reset", regs, SIGABRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (saved_hsrrs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) mtspr(SPRN_HSRR0, hsrr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) mtspr(SPRN_HSRR1, hsrr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) nmi_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) this_cpu_set_ftrace_enabled(ftrace_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* What should we do here? We could issue a shutdown or hard reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * I/O accesses can cause machine checks on powermacs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * Check if the NIP corresponds to the address of a sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * instruction for which there is an entry in the exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * -- paulus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static inline int check_io_access(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) #ifdef CONFIG_PPC32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) unsigned long msr = regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) const struct exception_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) unsigned int *nip = (unsigned int *)regs->nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) && (entry = search_exception_tables(regs->nip)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * Check that it's a sync instruction, or somewhere
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * in the twi; isync; nop sequence that inb/inw/inl uses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * As the address is in the exception table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * we should be able to read the instr there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * For the debug message, we look at the preceding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * load or store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (*nip == PPC_INST_NOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) nip -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) else if (*nip == PPC_INST_ISYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) --nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) unsigned int rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) --nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) rb = (*nip >> 11) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) printk(KERN_DEBUG "%s bad port %lx at %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) (*nip & 0x100)? "OUT to": "IN from",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) regs->gpr[rb] - _IO_BASE, nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) regs->msr |= MSR_RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) regs->nip = extable_fixup(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) #endif /* CONFIG_PPC32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) #ifdef CONFIG_PPC_ADV_DEBUG_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* On 4xx, the reason for the machine check or program exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) is in the ESR. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #define get_reason(regs) ((regs)->dsisr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) #define REASON_FP ESR_FP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) #define REASON_PRIVILEGED ESR_PPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) #define REASON_TRAP ESR_PTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) #define REASON_PREFIXED 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) #define REASON_BOUNDARY 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* single-step stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) #define clear_br_trace(regs) do {} while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* On non-4xx, the reason for the machine check or program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) exception is in the MSR. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) #define get_reason(regs) ((regs)->msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) #define REASON_TM SRR1_PROGTM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) #define REASON_FP SRR1_PROGFPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) #define REASON_ILLEGAL SRR1_PROGILL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) #define REASON_PRIVILEGED SRR1_PROGPRIV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) #define REASON_TRAP SRR1_PROGTRAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) #define REASON_PREFIXED SRR1_PREFIXED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) #define REASON_BOUNDARY SRR1_BOUNDARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) #define single_stepping(regs) ((regs)->msr & MSR_SE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) #define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) #define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) #if defined(CONFIG_E500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) int machine_check_e500mc(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) unsigned long mcsr = mfspr(SPRN_MCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) unsigned long pvr = mfspr(SPRN_PVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) unsigned long reason = mcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) int recoverable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (reason & MCSR_LD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) recoverable = fsl_rio_mcheck_exception(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (recoverable == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) goto silent_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) printk("Machine check in kernel mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) printk("Caused by (from MCSR=%lx): ", reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (reason & MCSR_MCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) pr_cont("Machine Check Signal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (reason & MCSR_ICPERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) pr_cont("Instruction Cache Parity Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * This is recoverable by invalidating the i-cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * This will generally be accompanied by an instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * fetch error report -- only treat MCSR_IF as fatal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * if it wasn't due to an L1 parity error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) reason &= ~MCSR_IF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (reason & MCSR_DCPERR_MC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) pr_cont("Data Cache Parity Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * In write shadow mode we auto-recover from the error, but it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * may still get logged and cause a machine check. We should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * only treat the non-write shadow case as non-recoverable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * is not implemented but L1 data cache always runs in write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * shadow mode. Hence on data cache parity errors HW will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * automatically invalidate the L1 Data Cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (PVR_VER(pvr) != PVR_VER_E6500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (reason & MCSR_L2MMU_MHIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) pr_cont("Hit on multiple TLB entries\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (reason & MCSR_NMI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) pr_cont("Non-maskable interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (reason & MCSR_IF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) pr_cont("Instruction Fetch Error Report\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (reason & MCSR_LD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) pr_cont("Load Error Report\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (reason & MCSR_ST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) pr_cont("Store Error Report\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (reason & MCSR_LDG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) pr_cont("Guarded Load Error Report\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (reason & MCSR_TLBSYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) pr_cont("Simultaneous tlbsync operations\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (reason & MCSR_BSL2_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) pr_cont("Level 2 Cache Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (reason & MCSR_MAV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) addr = mfspr(SPRN_MCAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) addr |= (u64)mfspr(SPRN_MCARU) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) pr_cont("Machine Check %s Address: %#llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) reason & MCSR_MEA ? "Effective" : "Physical", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) silent_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) mtspr(SPRN_MCSR, mcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return mfspr(SPRN_MCSR) == 0 && recoverable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) int machine_check_e500(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) unsigned long reason = mfspr(SPRN_MCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (reason & MCSR_BUS_RBERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (fsl_rio_mcheck_exception(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (fsl_pci_mcheck_exception(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) printk("Machine check in kernel mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) printk("Caused by (from MCSR=%lx): ", reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (reason & MCSR_MCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) pr_cont("Machine Check Signal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (reason & MCSR_ICPERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) pr_cont("Instruction Cache Parity Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (reason & MCSR_DCP_PERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) pr_cont("Data Cache Push Parity Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (reason & MCSR_DCPERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) pr_cont("Data Cache Parity Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (reason & MCSR_BUS_IAERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) pr_cont("Bus - Instruction Address Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (reason & MCSR_BUS_RAERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) pr_cont("Bus - Read Address Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (reason & MCSR_BUS_WAERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) pr_cont("Bus - Write Address Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (reason & MCSR_BUS_IBERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) pr_cont("Bus - Instruction Data Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (reason & MCSR_BUS_RBERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) pr_cont("Bus - Read Data Bus Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (reason & MCSR_BUS_WBERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) pr_cont("Bus - Write Data Bus Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (reason & MCSR_BUS_IPERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) pr_cont("Bus - Instruction Parity Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (reason & MCSR_BUS_RPERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) pr_cont("Bus - Read Parity Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) int machine_check_generic(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) #elif defined(CONFIG_E200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) int machine_check_e200(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) unsigned long reason = mfspr(SPRN_MCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) printk("Machine check in kernel mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) printk("Caused by (from MCSR=%lx): ", reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (reason & MCSR_MCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) pr_cont("Machine Check Signal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (reason & MCSR_CP_PERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) pr_cont("Cache Push Parity Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (reason & MCSR_CPERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) pr_cont("Cache Parity Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (reason & MCSR_EXCP_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) pr_cont("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (reason & MCSR_BUS_IRERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) pr_cont("Bus - Read Bus Error on instruction fetch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (reason & MCSR_BUS_DRERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) pr_cont("Bus - Read Bus Error on data load\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (reason & MCSR_BUS_WRERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) pr_cont("Bus - Write Bus Error on buffered store or cache line push\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) #elif defined(CONFIG_PPC32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) int machine_check_generic(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) unsigned long reason = regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) printk("Machine check in kernel mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) printk("Caused by (from SRR1=%lx): ", reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) switch (reason & 0x601F0000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) case 0x80000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) pr_cont("Machine check signal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) case 0x40000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) case 0x140000: /* 7450 MSS error and TEA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) pr_cont("Transfer error ack signal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) case 0x20000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) pr_cont("Data parity error signal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) case 0x10000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) pr_cont("Address parity error signal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) case 0x20000000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) pr_cont("L1 Data Cache error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) case 0x40000000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) pr_cont("L1 Instruction Cache error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) case 0x00100000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) pr_cont("L2 data cache parity error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) pr_cont("Unknown values in msr\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) #endif /* everything else */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) void machine_check_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) int recover = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * BOOK3S_64 does not call this handler as a non-maskable interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * (it uses its own early real-mode handler to handle the MCE proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * and then raises irq_work to call this handler when interrupts are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * enabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * This is silly. The BOOK3S_64 should just call a different function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * rather than expecting semantics to magically change. Something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * like 'non_nmi_machine_check_exception()', perhaps?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) const bool nmi = !IS_ENABLED(CONFIG_PPC_BOOK3S_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (nmi) nmi_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) __this_cpu_inc(irq_stat.mce_exceptions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* See if any machine dependent calls. In theory, we would want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * to call the CPU first, and call the ppc_md. one if the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * one returns a positive number. However there is existing code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * that assumes the board gets a first chance, so let's keep it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * that way for now and fix things later. --BenH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (ppc_md.machine_check_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) recover = ppc_md.machine_check_exception(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) else if (cur_cpu_spec->machine_check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) recover = cur_cpu_spec->machine_check(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (recover > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (debugger_fault_handler(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (check_io_access(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (nmi) nmi_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) die("Machine check", regs, SIGBUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /* Must die if the interrupt is not recoverable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (!(regs->msr & MSR_RI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) die("Unrecoverable Machine check", regs, SIGBUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (nmi) nmi_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) void SMIException(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) die("System Management Interrupt", regs, SIGABRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) static void p9_hmi_special_emu(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) unsigned int ra, rb, t, i, sel, instr, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) const void __user *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) u8 vbuf[16] __aligned(16), *vdst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) unsigned long ea, msr, msr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) bool swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * lxvb16x opcode: 0x7c0006d8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * lxvd2x opcode: 0x7c000698
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * lxvh8x opcode: 0x7c000658
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * lxvw4x opcode: 0x7c000618
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if ((instr & 0xfc00073e) != 0x7c000618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) " instr=%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) smp_processor_id(), current->comm, current->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) regs->nip, instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /* Grab vector registers into the task struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) msr = regs->msr; /* Grab msr before we flush the bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) flush_vsx_to_thread(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) enable_kernel_altivec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * Is userspace running with a different endian (this is rare but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * not impossible)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /* Decode the instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ra = (instr >> 16) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) rb = (instr >> 11) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) t = (instr >> 21) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (instr & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) vdst = (u8 *)¤t->thread.vr_state.vr[t];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) vdst = (u8 *)¤t->thread.fp_state.fpr[t][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /* Grab the vector address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (is_32bit_task())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) ea &= 0xfffffffful;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) addr = (__force const void __user *)ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* Check it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (!access_ok(addr, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) " instr=%08x addr=%016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) smp_processor_id(), current->comm, current->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) regs->nip, instr, (unsigned long)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* Read the vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if ((unsigned long)addr & 0xfUL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /* unaligned case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) rc = __copy_from_user_inatomic(vbuf, addr, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) __get_user_atomic_128_aligned(vbuf, addr, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) " instr=%08x addr=%016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) smp_processor_id(), current->comm, current->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) regs->nip, instr, (unsigned long)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) " instr=%08x addr=%016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) smp_processor_id(), current->comm, current->pid, regs->nip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) instr, (unsigned long) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /* Grab instruction "selector" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) sel = (instr >> 6) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * Check to make sure the facility is actually enabled. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * could happen if we get a false positive hit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) msr_mask = MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) msr_mask = MSR_VEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (!(msr & msr_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) " instr=%08x msr:%016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) smp_processor_id(), current->comm, current->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) regs->nip, instr, msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /* Do logging here before we modify sel based on endian */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) switch (sel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) case 0: /* lxvw4x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) PPC_WARN_EMULATED(lxvw4x, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) case 1: /* lxvh8x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) PPC_WARN_EMULATED(lxvh8x, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) case 2: /* lxvd2x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) PPC_WARN_EMULATED(lxvd2x, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) case 3: /* lxvb16x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) PPC_WARN_EMULATED(lxvb16x, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) #ifdef __LITTLE_ENDIAN__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * An LE kernel stores the vector in the task struct as an LE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * byte array (effectively swapping both the components and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * the content of the components). Those instructions expect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * the components to remain in ascending address order, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * swap them back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * If we are running a BE user space, the expectation is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * of a simple memcpy, so forcing the emulation to look like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * a lxvb16x should do the trick.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) sel = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) switch (sel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) case 0: /* lxvw4x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) for (i = 0; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) case 1: /* lxvh8x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) case 2: /* lxvd2x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) for (i = 0; i < 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) case 3: /* lxvb16x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) vdst[i] = vbuf[15-i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) #else /* __LITTLE_ENDIAN__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /* On a big endian kernel, a BE userspace only needs a memcpy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (!swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) sel = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* Otherwise, we need to swap the content of the components */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) switch (sel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) case 0: /* lxvw4x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) for (i = 0; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) case 1: /* lxvh8x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) case 2: /* lxvd2x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) for (i = 0; i < 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) case 3: /* lxvb16x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) memcpy(vdst, vbuf, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) #endif /* !__LITTLE_ENDIAN__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* Go to next instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) regs->nip += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) #endif /* CONFIG_VSX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) void handle_hmi_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct pt_regs *old_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) old_regs = set_irq_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) irq_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /* Real mode flagged P9 special emu is needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (local_paca->hmi_p9_special_emu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) local_paca->hmi_p9_special_emu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * We don't want to take page faults while doing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * emulation, we just replay the instruction if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) pagefault_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) p9_hmi_special_emu(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) pagefault_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) #endif /* CONFIG_VSX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (ppc_md.handle_hmi_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) ppc_md.handle_hmi_exception(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) irq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) set_irq_regs(old_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) void unknown_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) regs->nip, regs->msr, regs->trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) _exception(SIGTRAP, regs, TRAP_UNK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) void instruction_breakpoint_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 5, SIGTRAP) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (debugger_iabr_match(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) void RunModeException(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) _exception(SIGTRAP, regs, TRAP_UNK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) void single_step_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) clear_single_step(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) clear_br_trace(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (kprobe_post_handler(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (notify_die(DIE_SSTEP, "single_step", regs, 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 5, SIGTRAP) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (debugger_sstep(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) NOKPROBE_SYMBOL(single_step_exception);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * After we have successfully emulated an instruction, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * check if the instruction was being single-stepped, and if so,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * pretend we got a single-step exception. This was pointed out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * by Kumar Gala. -- paulus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static void emulate_single_step(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (single_stepping(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) single_step_exception(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static inline int __parse_fpscr(unsigned long fpscr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) int ret = FPE_FLTUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* Invalid operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) ret = FPE_FLTINV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /* Overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) ret = FPE_FLTOVF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /* Underflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) ret = FPE_FLTUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /* Divide by zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) ret = FPE_FLTDIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* Inexact result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) ret = FPE_FLTRES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static void parse_fpe(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) int code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) flush_fp_to_thread(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) code = __parse_fpscr(current->thread.fp_state.fpscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) _exception(SIGFPE, regs, code, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * Illegal instruction emulation support. Originally written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * provide the PVR to user applications using the mfspr rd, PVR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * Return non-zero if we can't emulate, or -EFAULT if the associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * memory access caused an access fault. Return zero on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * There are a couple of ways to do this, either "decode" the instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * or directly match lots of bits. In this case, matching lots of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * bits is faster and easier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) static int emulate_string_inst(struct pt_regs *regs, u32 instword)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) u8 rT = (instword >> 21) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) u8 rA = (instword >> 16) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) u8 NB_RB = (instword >> 11) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) u32 num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) unsigned long EA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) int pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /* Early out if we are an invalid form of lswx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if ((rT == rA) || (rT == NB_RB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) EA = (rA == 0) ? 0 : regs->gpr[rA];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) switch (instword & PPC_INST_STRING_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) case PPC_INST_LSWX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) case PPC_INST_STSWX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) EA += NB_RB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) num_bytes = regs->xer & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) case PPC_INST_LSWI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) case PPC_INST_STSWI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) num_bytes = (NB_RB == 0) ? 32 : NB_RB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) while (num_bytes != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) u32 shift = 8 * (3 - (pos & 0x3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /* if process is 32-bit, clear upper 32 bits of EA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if ((regs->msr & MSR_64BIT) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) EA &= 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) switch ((instword & PPC_INST_STRING_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) case PPC_INST_LSWX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) case PPC_INST_LSWI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (get_user(val, (u8 __user *)EA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /* first time updating this reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * zero it out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (pos == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) regs->gpr[rT] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) regs->gpr[rT] |= val << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) case PPC_INST_STSWI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) case PPC_INST_STSWX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) val = regs->gpr[rT] >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (put_user(val, (u8 __user *)EA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /* move EA to next address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) EA += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) num_bytes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /* manage our position within the register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (++pos == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (++rT == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) rT = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) u32 ra,rs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) ra = (instword >> 16) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) rs = (instword >> 21) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) tmp = regs->gpr[rs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) regs->gpr[ra] = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static int emulate_isel(struct pt_regs *regs, u32 instword)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) u8 rT = (instword >> 21) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) u8 rA = (instword >> 16) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) u8 rB = (instword >> 11) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) u8 BC = (instword >> 6) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) u8 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) tmp = (rA == 0) ? 0 : regs->gpr[rA];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) bit = (regs->ccr >> (31 - BC)) & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) static inline bool tm_abort_check(struct pt_regs *regs, int cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /* If we're emulating a load/store in an active transaction, we cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * emulate it as the kernel operates in transaction suspended context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * We need to abort the transaction. This creates a persistent TM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * abort so tell the user what caused it with a new code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (MSR_TM_TRANSACTIONAL(regs->msr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) tm_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) tm_abort(cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) static inline bool tm_abort_check(struct pt_regs *regs, int reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) static int emulate_instruction(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) u32 instword;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) u32 rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) CHECK_FULL_REGS(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (get_user(instword, (u32 __user *)(regs->nip)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /* Emulate the mfspr rD, PVR. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) PPC_WARN_EMULATED(mfpvr, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) rd = (instword >> 21) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) regs->gpr[rd] = mfspr(SPRN_PVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) /* Emulating the dcba insn is just a no-op. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) PPC_WARN_EMULATED(dcba, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) /* Emulate the mcrxr insn. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) int shift = (instword >> 21) & 0x1c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) unsigned long msk = 0xf0000000UL >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) PPC_WARN_EMULATED(mcrxr, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) regs->xer &= ~0xf0000000UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) /* Emulate load/store string insn. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (tm_abort_check(regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) PPC_WARN_EMULATED(string, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return emulate_string_inst(regs, instword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) /* Emulate the popcntb (Population Count Bytes) instruction. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) PPC_WARN_EMULATED(popcntb, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) return emulate_popcntb_inst(regs, instword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /* Emulate isel (Integer Select) instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) PPC_WARN_EMULATED(isel, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) return emulate_isel(regs, instword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) /* Emulate sync instruction variants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) PPC_WARN_EMULATED(sync, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) asm volatile("sync");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) /* Emulate the mfspr rD, DSCR. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) PPC_INST_MFSPR_DSCR_USER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) PPC_INST_MFSPR_DSCR)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) cpu_has_feature(CPU_FTR_DSCR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) PPC_WARN_EMULATED(mfdscr, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) rd = (instword >> 21) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) regs->gpr[rd] = mfspr(SPRN_DSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) /* Emulate the mtspr DSCR, rD. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) PPC_INST_MTSPR_DSCR_USER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) PPC_INST_MTSPR_DSCR)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) cpu_has_feature(CPU_FTR_DSCR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) PPC_WARN_EMULATED(mtdscr, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) rd = (instword >> 21) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) current->thread.dscr = regs->gpr[rd];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) current->thread.dscr_inherit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) mtspr(SPRN_DSCR, current->thread.dscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) int is_valid_bugaddr(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) return is_kernel_addr(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) #ifdef CONFIG_MATH_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) static int emulate_math(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) extern int do_mathemu(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) ret = do_mathemu(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) PPC_WARN_EMULATED(math, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) emulate_single_step(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) case 1: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) int code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) code = __parse_fpscr(current->thread.fp_state.fpscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) _exception(SIGFPE, regs, code, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) case -EFAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static inline int emulate_math(struct pt_regs *regs) { return -1; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) void program_check_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) unsigned int reason = get_reason(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) /* We can now get here via a FP Unavailable exception if the core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * has no FPU, in that case the reason flags will be 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (reason & REASON_FP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) /* IEEE FP exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) parse_fpe(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (reason & REASON_TRAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) unsigned long bugaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /* Debugger is first in line to stop recursive faults in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) * rcu_lock, notify_die, or atomic_notifier_call_chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (debugger_bpt(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (kprobe_handler(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) /* trap exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) bugaddr = regs->nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * Fixup bugaddr for BUG_ON() in real mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) bugaddr += PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (!(regs->msr & MSR_PR) && /* not user-mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) regs->nip += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (reason & REASON_TM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) /* This is a TM "Bad Thing Exception" program check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * This occurs when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * - An rfid/hrfid/mtmsrd attempts to cause an illegal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * transition in TM states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * - A trechkpt is attempted when transactional.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * - A treclaim is attempted when non transactional.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) * - A tend is illegally attempted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * - writing a TM SPR when transactional.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * If usermode caused this, it's done something illegal and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * gets a SIGILL slap on the wrist. We call it an illegal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * operand to distinguish from the instruction just being bad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * (e.g. executing a 'tend' on a CPU without TM!); it's an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * illegal /placement/ of a valid instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) printk(KERN_EMERG "Unexpected TM Bad Thing exception "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) "at %lx (msr 0x%lx) tm_scratch=%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) regs->nip, regs->msr, get_paca()->tm_scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) die("Unrecoverable exception", regs, SIGABRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * If we took the program check in the kernel skip down to sending a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * SIGILL. The subsequent cases all relate to emulating instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) * which we should only do for userspace. We also do not want to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * interrupts for kernel faults because that might lead to further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * faults, and loose the context of the original exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) goto sigill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) /* We restore the interrupt state now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (!arch_irq_disabled_regs(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) /* (reason & REASON_ILLEGAL) would be the obvious thing here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * but there seems to be a hardware bug on the 405GP (RevD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * that means ESR is sometimes set incorrectly - either to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) * ESR_DST (!?) or 0. In the process of chasing this with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) * hardware people - not sure if it can happen on any illegal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) * instruction or only on FP instructions, whether there is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) * pattern to occurrences etc. -dgibson 31/Mar/2003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (!emulate_math(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) /* Try to emulate it if we should. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) switch (emulate_instruction(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) regs->nip += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) emulate_single_step(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) case -EFAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) sigill:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (reason & REASON_PRIVILEGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) NOKPROBE_SYMBOL(program_check_exception);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * This occurs when running in hypervisor mode on POWER6 or later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) * and an illegal instruction is encountered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) void emulation_assist_interrupt(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) regs->msr |= REASON_ILLEGAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) program_check_exception(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) NOKPROBE_SYMBOL(emulation_assist_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) void alignment_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) int sig, code, fixed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) unsigned long reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /* We restore the interrupt state now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (!arch_irq_disabled_regs(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) reason = get_reason(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (reason & REASON_BOUNDARY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) sig = SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) code = BUS_ADRALN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) /* we don't implement logging of alignment exceptions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) fixed = fix_alignment(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (fixed == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) /* skip over emulated instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) regs->nip += inst_length(reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) emulate_single_step(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) /* Operand address was bad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (fixed == -EFAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) sig = SIGSEGV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) code = SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) sig = SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) code = BUS_ADRALN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) _exception(sig, regs, code, regs->dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) bad_page_fault(regs, regs->dar, sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) void StackOverflow(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) current->comm, task_pid_nr(current), regs->gpr[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) debugger(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) panic("kernel stack overflow");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) void stack_overflow_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) die("Kernel stack overflow", regs, SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) void kernel_fp_unavailable_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) "%lx at %lx\n", regs->trap, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) void altivec_unavailable_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) /* A user program has executed an altivec instruction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) but this kernel doesn't support altivec. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) "%lx at %lx\n", regs->trap, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) void vsx_unavailable_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) /* A user program has executed an vsx instruction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) but this kernel doesn't support vsx. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) "%lx at %lx\n", regs->trap, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) static void tm_unavailable(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) current->thread.load_tm++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) regs->msr |= MSR_TM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) tm_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) tm_restore_sprs(¤t->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) pr_emerg("Unrecoverable TM Unavailable Exception "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) "%lx at %lx\n", regs->trap, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) void facility_unavailable_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) static char *facility_strings[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) [FSCR_FP_LG] = "FPU",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) [FSCR_VECVSX_LG] = "VMX/VSX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) [FSCR_DSCR_LG] = "DSCR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) [FSCR_PM_LG] = "PMU SPRs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) [FSCR_BHRB_LG] = "BHRB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) [FSCR_TM_LG] = "TM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) [FSCR_EBB_LG] = "EBB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) [FSCR_TAR_LG] = "TAR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) [FSCR_MSGP_LG] = "MSGP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) [FSCR_SCV_LG] = "SCV",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) [FSCR_PREFIX_LG] = "PREFIX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) char *facility = "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) u32 instword, rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) bool hv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) hv = (TRAP(regs) == 0xf80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (hv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) value = mfspr(SPRN_HFSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) value = mfspr(SPRN_FSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) status = value >> 56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if ((hv || status >= 2) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) (status < ARRAY_SIZE(facility_strings)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) facility_strings[status])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) facility = facility_strings[status];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) /* We should not have taken this interrupt in kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (!user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) facility, status, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) die("Unexpected facility unavailable exception", regs, SIGABRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /* We restore the interrupt state now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (!arch_irq_disabled_regs(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (status == FSCR_DSCR_LG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * User is accessing the DSCR register using the problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * state only SPR number (0x03) either through a mfspr or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * a mtspr instruction. If it is a write attempt through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * a mtspr, then we set the inherit bit. This also allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) * the user to write or read the register directly in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * future by setting via the FSCR DSCR bit. But in case it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) * is a read DSCR attempt through a mfspr instruction, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * just emulate the instruction instead. This code path will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * always emulate all the mfspr instructions till the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) * has attempted at least one mtspr instruction. This way it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) * preserves the same behaviour when the user is accessing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) * the DSCR through privilege level only SPR number (0x11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * which is emulated through illegal instruction exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * We always leave HFSCR DSCR set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (get_user(instword, (u32 __user *)(regs->nip))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) pr_err("Failed to fetch the user instruction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) /* Write into DSCR (mtspr 0x03, RS) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) == PPC_INST_MTSPR_DSCR_USER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) rd = (instword >> 21) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) current->thread.dscr = regs->gpr[rd];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) current->thread.dscr_inherit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) current->thread.fscr |= FSCR_DSCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) mtspr(SPRN_FSCR, current->thread.fscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) /* Read from DSCR (mfspr RT, 0x03) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) == PPC_INST_MFSPR_DSCR_USER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (emulate_instruction(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) pr_err("DSCR based mfspr emulation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) regs->nip += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) emulate_single_step(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (status == FSCR_TM_LG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) * If we're here then the hardware is TM aware because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) * generated an exception with FSRM_TM set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * told us not to do TM, or the kernel is not built with TM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) * support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) * If both of those things are true, then userspace can spam the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) * console by triggering the printk() below just by continually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * doing tbegin (or any TM instruction). So in that case just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * send the process a SIGILL immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if (!cpu_has_feature(CPU_FTR_TM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) tm_unavailable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) void fp_unavailable_tm(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) /* Note: This does not handle any kind of FP laziness. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) regs->nip, regs->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) /* We can only have got here if the task started using FP after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * beginning the transaction. So, the transactional regs are just a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * copy of the checkpointed ones. But, we still need to recheckpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) * as we're enabling FP for the process; it will return, abort the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) * transaction, and probably retry but now with FP enabled. So the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) * checkpointed FP registers need to be loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) tm_reclaim_current(TM_CAUSE_FAC_UNAV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) * then it was overwrite by the thr->fp_state by tm_reclaim_thread().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) * At this point, ck{fp,vr}_state contains the exact values we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * recheckpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) /* Enable FP for the task: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) current->thread.load_fp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) tm_recheckpoint(¤t->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) void altivec_unavailable_tm(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) /* See the comments in fp_unavailable_tm(). This function operates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * the same way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) "MSR=%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) regs->nip, regs->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) tm_reclaim_current(TM_CAUSE_FAC_UNAV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) current->thread.load_vec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) tm_recheckpoint(¤t->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) current->thread.used_vr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) void vsx_unavailable_tm(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) /* See the comments in fp_unavailable_tm(). This works similarly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) * though we're loading both FP and VEC registers in here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) * regs. Either way, set MSR_VSX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) "MSR=%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) regs->nip, regs->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) current->thread.used_vsr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) /* This reclaims FP and/or VR regs if they're already enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) tm_reclaim_current(TM_CAUSE_FAC_UNAV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) current->thread.load_vec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) current->thread.load_fp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) tm_recheckpoint(¤t->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) static void performance_monitor_exception_nmi(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) nmi_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) __this_cpu_inc(irq_stat.pmu_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) perf_irq(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) nmi_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) static void performance_monitor_exception_async(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) irq_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) __this_cpu_inc(irq_stat.pmu_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) perf_irq(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) irq_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) void performance_monitor_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * On 64-bit, if perf interrupts hit in a local_irq_disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) * (soft-masked) region, we consider them as NMIs. This is required to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) * prevent hash faults on user addresses when reading callchains (and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) * looks better from an irq tracing perspective).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) performance_monitor_exception_nmi(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) performance_monitor_exception_async(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) #ifdef CONFIG_PPC_ADV_DEBUG_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) int changed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * Determine the cause of the debug event, clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * event flags and send a trap to the handler. Torez
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) do_send_trap(regs, mfspr(SPRN_DAC1), debug_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) changed |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) do_send_trap(regs, mfspr(SPRN_DAC2), debug_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) changed |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) } else if (debug_status & DBSR_IAC1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) do_send_trap(regs, mfspr(SPRN_IAC1), debug_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) changed |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) } else if (debug_status & DBSR_IAC2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) do_send_trap(regs, mfspr(SPRN_IAC2), debug_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) changed |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) } else if (debug_status & DBSR_IAC3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) do_send_trap(regs, mfspr(SPRN_IAC3), debug_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) changed |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) } else if (debug_status & DBSR_IAC4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) do_send_trap(regs, mfspr(SPRN_IAC4), debug_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) changed |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) * At the point this routine was called, the MSR(DE) was turned off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) * Check all other debug flags and see if that bit needs to be turned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) * back on or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) current->thread.debug.dbcr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) regs->msr |= MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) /* Make sure the IDM flag is off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) current->thread.debug.dbcr0 &= ~DBCR0_IDM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) if (changed & 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) void DebugException(struct pt_regs *regs, unsigned long debug_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) current->thread.debug.dbsr = debug_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * on server, it stops on the target of the branch. In order to simulate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * the server behaviour, we thus restart right away with a single step
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) * instead of stopping here when hitting a BT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) if (debug_status & DBSR_BT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) regs->msr &= ~MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) /* Disable BT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) /* Clear the BT event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) mtspr(SPRN_DBSR, DBSR_BT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) /* Do the single step trick only when coming from userspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) current->thread.debug.dbcr0 &= ~DBCR0_BT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) regs->msr |= MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (kprobe_post_handler(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) if (notify_die(DIE_SSTEP, "block_step", regs, 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 5, SIGTRAP) == NOTIFY_STOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if (debugger_sstep(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) } else if (debug_status & DBSR_IC) { /* Instruction complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) regs->msr &= ~MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) /* Disable instruction completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) /* Clear the instruction completion event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) mtspr(SPRN_DBSR, DBSR_IC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (kprobe_post_handler(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) if (notify_die(DIE_SSTEP, "single_step", regs, 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 5, SIGTRAP) == NOTIFY_STOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) if (debugger_sstep(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) current->thread.debug.dbcr0 &= ~DBCR0_IC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) current->thread.debug.dbcr1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) regs->msr |= MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) /* Make sure the IDM bit is off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) current->thread.debug.dbcr0 &= ~DBCR0_IDM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) handle_debug(regs, debug_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) NOKPROBE_SYMBOL(DebugException);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) void altivec_assist_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) if (!user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) " at %lx\n", regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) die("Kernel VMX/Altivec assist exception", regs, SIGILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) flush_altivec_to_thread(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) PPC_WARN_EMULATED(altivec, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) err = emulate_altivec(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) regs->nip += 4; /* skip emulated instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) emulate_single_step(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (err == -EFAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) /* got an error reading the instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) /* didn't recognize the instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) /* XXX quick hack for now: set the non-Java bit in the VSCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) "in %s at %lx\n", current->comm, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) current->thread.vr_state.vscr.u[3] |= 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) #endif /* CONFIG_ALTIVEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) #ifdef CONFIG_FSL_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) void CacheLockingException(struct pt_regs *regs, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) unsigned long error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) /* We treat cache locking instructions from the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * as priv ops, in the future we could try to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * something smarter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) if (error_code & (ESR_DLK|ESR_ILK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) #endif /* CONFIG_FSL_BOOKE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) #ifdef CONFIG_SPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) void SPEFloatingPointException(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) extern int do_spe_mathemu(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) unsigned long spefscr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) int fpexc_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) int code = FPE_FLTUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) /* We restore the interrupt state now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) if (!arch_irq_disabled_regs(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) flush_spe_to_thread(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) spefscr = current->thread.spefscr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) fpexc_mode = current->thread.fpexc_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) code = FPE_FLTOVF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) code = FPE_FLTUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) code = FPE_FLTDIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) code = FPE_FLTINV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) code = FPE_FLTRES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) err = do_spe_mathemu(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) regs->nip += 4; /* skip emulated instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) emulate_single_step(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (err == -EFAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) /* got an error reading the instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) } else if (err == -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) /* didn't recognize the instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) printk(KERN_ERR "unrecognized spe instruction "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) "in %s at %lx\n", current->comm, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) _exception(SIGFPE, regs, code, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) void SPEFloatingPointRoundException(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) extern int speround_handler(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) /* We restore the interrupt state now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) if (!arch_irq_disabled_regs(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) if (regs->msr & MSR_SPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) giveup_spe(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) regs->nip -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) err = speround_handler(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) regs->nip += 4; /* skip emulated instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) emulate_single_step(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) if (err == -EFAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) /* got an error reading the instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) } else if (err == -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) /* didn't recognize the instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) printk(KERN_ERR "unrecognized spe instruction "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) "in %s at %lx\n", current->comm, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) _exception(SIGFPE, regs, FPE_FLTUNK, regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) * We enter here if we get an unrecoverable exception, that is, one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) * that happened at a point where the RI (recoverable interrupt) bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) * in the MSR is 0. This indicates that SRR0/1 are live, and that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) * we therefore lost state by taking this exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) void unrecoverable_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) regs->trap, regs->nip, regs->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) die("Unrecoverable exception", regs, SIGABRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) NOKPROBE_SYMBOL(unrecoverable_exception);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * Default handler for a Watchdog exception,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * spins until a reboot occurs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) /* Generic WatchdogHandler, implement your own */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) void WatchdogException(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) WatchdogHandler(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) * We enter here if we discover during exception entry that we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) * running in supervisor mode with a userspace value in the stack pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) void kernel_bad_stack(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) regs->gpr[1], regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) die("Bad kernel stack pointer", regs, SIGABRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) NOKPROBE_SYMBOL(kernel_bad_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) void __init trap_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) #ifdef CONFIG_PPC_EMULATED_STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) #define WARN_EMULATED_SETUP(type) .type = { .name = #type }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) struct ppc_emulated ppc_emulated = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) WARN_EMULATED_SETUP(altivec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) WARN_EMULATED_SETUP(dcba),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) WARN_EMULATED_SETUP(dcbz),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) WARN_EMULATED_SETUP(fp_pair),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) WARN_EMULATED_SETUP(isel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) WARN_EMULATED_SETUP(mcrxr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) WARN_EMULATED_SETUP(mfpvr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) WARN_EMULATED_SETUP(multiple),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) WARN_EMULATED_SETUP(popcntb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) WARN_EMULATED_SETUP(spe),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) WARN_EMULATED_SETUP(string),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) WARN_EMULATED_SETUP(sync),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) WARN_EMULATED_SETUP(unaligned),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) #ifdef CONFIG_MATH_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) WARN_EMULATED_SETUP(math),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) WARN_EMULATED_SETUP(vsx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) WARN_EMULATED_SETUP(mfdscr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) WARN_EMULATED_SETUP(mtdscr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) WARN_EMULATED_SETUP(lq_stq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) WARN_EMULATED_SETUP(lxvw4x),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) WARN_EMULATED_SETUP(lxvh8x),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) WARN_EMULATED_SETUP(lxvd2x),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) WARN_EMULATED_SETUP(lxvb16x),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) u32 ppc_warn_emulated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) void ppc_warn_emulated_print(const char *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) static int __init ppc_warn_emulated_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) struct dentry *dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) dir = debugfs_create_dir("emulated_instructions",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) powerpc_debugfs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) debugfs_create_u32("do_warn", 0644, dir, &ppc_warn_emulated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) debugfs_create_u32(entries[i].name, 0644, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) (u32 *)&entries[i].val.counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) device_initcall(ppc_warn_emulated_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) #endif /* CONFIG_PPC_EMULATED_STATS */