^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* arch/sparc64/kernel/traps.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1995,1997,2008,2009,2012 David S. Miller (davem@davemloft.net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * I like traps on v9, :))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/context_tracking.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/oplib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/fpumacro.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/lsu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/dcu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/estate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/chafsr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/sfafsr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/psrcompat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/memctrl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "entry.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include "kernel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "kstack.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* When an irrecoverable trap occurs at tl > 0, the trap entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * code logs the trap state registers at every level in the trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * is as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct tl1_traplog {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long tstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned long tpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned long tnpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned long tt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) } trapstack[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned long tl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void dump_tl1_traplog(struct tl1_traplog *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int i, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) "dumping track stack.\n", p->tl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) limit = (tlb_type == hypervisor) ? 2 : 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) for (i = 0; i < limit; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) printk(KERN_EMERG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) "TNPC[%016lx] TT[%lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) i + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) p->trapstack[i].tstate, p->trapstack[i].tpc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) p->trapstack[i].tnpc, p->trapstack[i].tt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) void bad_trap(struct pt_regs *regs, long lvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) char buffer[36];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (notify_die(DIE_TRAP, "bad trap", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) 0, lvl, SIGTRAP) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (lvl < 0x100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) die_if_kernel(buffer, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) lvl -= 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) sprintf(buffer, "Kernel bad sw trap %lx", lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) die_if_kernel(buffer, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (test_thread_flag(TIF_32BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) regs->tpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) regs->tnpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) force_sig_fault(SIGILL, ILL_ILLTRP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) (void __user *)regs->tpc, lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void bad_trap_tl1(struct pt_regs *regs, long lvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) char buffer[36];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 0, lvl, SIGTRAP) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) sprintf (buffer, "Bad trap %lx at tl>0", lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) die_if_kernel (buffer, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #ifdef CONFIG_DEBUG_BUGVERBOSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) void do_BUG(const char *file, int line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) bust_spinlocks(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) printk("kernel BUG at %s:%d!\n", file, line);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) EXPORT_SYMBOL(do_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static DEFINE_SPINLOCK(dimm_handler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static dimm_printer_t dimm_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_lock_irqsave(&dimm_handler_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (dimm_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ret = dimm_handler(synd_code, paddr, buf, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) } else if (tlb_type == spitfire) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) spin_unlock_irqrestore(&dimm_handler_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int register_dimm_printer(dimm_printer_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) spin_lock_irqsave(&dimm_handler_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!dimm_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) dimm_handler = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) ret = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) spin_unlock_irqrestore(&dimm_handler_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) EXPORT_SYMBOL_GPL(register_dimm_printer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) void unregister_dimm_printer(dimm_printer_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) spin_lock_irqsave(&dimm_handler_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (dimm_handler == func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) dimm_handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) spin_unlock_irqrestore(&dimm_handler_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) EXPORT_SYMBOL_GPL(unregister_dimm_printer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (notify_die(DIE_TRAP, "instruction access exception", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 0, 0x8, SIGTRAP) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) printk("spitfire_insn_access_exception: SFSR[%016lx] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) "SFAR[%016lx], going.\n", sfsr, sfar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) die_if_kernel("Iax", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (test_thread_flag(TIF_32BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) regs->tpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) regs->tnpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) force_sig_fault(SIGSEGV, SEGV_MAPERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) (void __user *)regs->tpc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 0, 0x8, SIGTRAP) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) spitfire_insn_access_exception(regs, sfsr, sfar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) unsigned short type = (type_ctx >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) unsigned short ctx = (type_ctx & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (notify_die(DIE_TRAP, "instruction access exception", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 0, 0x8, SIGTRAP) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) printk("sun4v_insn_access_exception: ADDR[%016lx] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) "CTX[%04x] TYPE[%04x], going.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) addr, ctx, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) die_if_kernel("Iax", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (test_thread_flag(TIF_32BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) regs->tpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) regs->tnpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *) addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 0, 0x8, SIGTRAP) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) sun4v_insn_access_exception(regs, addr, type_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) bool is_no_fault_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) unsigned char asi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) u32 insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (get_user(insn, (u32 __user *)regs->tpc) == -EFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Must do a little instruction decoding here in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * decide on a course of action. The bits of interest are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * insn[31:30] = op, where 3 indicates the load/store group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * insn[24:19] = op3, which identifies individual opcodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * insn[13] indicates an immediate offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * op3[4]=1 identifies alternate space instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * op3[5:4]=3 identifies floating point instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * op3[2]=1 identifies stores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * See "Opcode Maps" in the appendix of any Sparc V9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * architecture spec for full details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if ((insn & 0xc0800000) == 0xc0800000) { /* op=3, op3[4]=1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (insn & 0x2000) /* immediate offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) asi = (regs->tstate >> 24); /* saved %asi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) asi = (insn >> 5); /* immediate asi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if ((asi & 0xf6) == ASI_PNF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (insn & 0x200000) /* op3[2], stores */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (insn & 0x1000000) /* op3[5:4]=3 (fp) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) handle_ldf_stq(insn, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) handle_ld_nf(insn, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (notify_die(DIE_TRAP, "data access exception", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 0, 0x30, SIGTRAP) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* Test if this comes from uaccess places. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) const struct exception_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) entry = search_exception_tables(regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* Ouch, somebody is trying VM hole tricks on us... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #ifdef DEBUG_EXCEPTIONS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) regs->tpc, entry->fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) regs->tpc = entry->fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) regs->tnpc = regs->tpc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* Shit... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) printk("spitfire_data_access_exception: SFSR[%016lx] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) "SFAR[%016lx], going.\n", sfsr, sfar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) die_if_kernel("Dax", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (is_no_fault_exception(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)sfar, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 0, 0x30, SIGTRAP) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) spitfire_data_access_exception(regs, sfsr, sfar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) unsigned short type = (type_ctx >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned short ctx = (type_ctx & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (notify_die(DIE_TRAP, "data access exception", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 0, 0x8, SIGTRAP) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* Test if this comes from uaccess places. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) const struct exception_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) entry = search_exception_tables(regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* Ouch, somebody is trying VM hole tricks on us... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) #ifdef DEBUG_EXCEPTIONS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) regs->tpc, entry->fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) regs->tpc = entry->fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) regs->tnpc = regs->tpc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) printk("sun4v_data_access_exception: ADDR[%016lx] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) "CTX[%04x] TYPE[%04x], going.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) addr, ctx, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) die_if_kernel("Dax", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (test_thread_flag(TIF_32BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) regs->tpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) regs->tnpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (is_no_fault_exception(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* MCD (Memory Corruption Detection) disabled trap (TT=0x19) in HV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * is vectored thorugh data access exception trap with fault type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * set to HV_FAULT_TYPE_MCD_DIS. Check for MCD disabled trap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * Accessing an address with invalid ASI for the address, for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * example setting an ADI tag on an address with ASI_MCD_PRIMARY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * when TTE.mcd is not set for the VA, is also vectored into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * kerbel by HV as data access exception with fault type set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * HV_FAULT_TYPE_INV_ASI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) case HV_FAULT_TYPE_INV_ASI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) force_sig_fault(SIGILL, ILL_ILLADR, (void __user *)addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) case HV_FAULT_TYPE_MCD_DIS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) force_sig_fault(SIGSEGV, SEGV_ACCADI, (void __user *)addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) force_sig_fault(SIGSEGV, SEGV_MAPERR, (void __user *)addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 0, 0x8, SIGTRAP) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) sun4v_data_access_exception(regs, addr, type_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) #include "pci_impl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* When access exceptions happen, we must do this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void spitfire_clean_and_reenable_l1_caches(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) unsigned long va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (tlb_type != spitfire)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* Clean 'em. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) spitfire_put_icache_tag(va, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) spitfire_put_dcache_tag(va, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* Re-enable in LSU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) __asm__ __volatile__("flush %%g6\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) "membar #Sync\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) "stxa %0, [%%g0] %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) LSU_CONTROL_IM | LSU_CONTROL_DM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) "i" (ASI_LSU_CONTROL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static void spitfire_enable_estate_errors(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) : "r" (ESTATE_ERR_ALL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) "i" (ASI_ESTATE_ERROR_EN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static char ecc_syndrome_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static char *syndrome_unknown = "<Unknown>";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) unsigned short scode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) char memmod_str[64], *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (udbl & bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) scode = ecc_syndrome_table[udbl & 0xff];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) p = syndrome_unknown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) p = memmod_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) "Memory Module \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) smp_processor_id(), scode, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (udbh & bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) scode = ecc_syndrome_table[udbh & 0xff];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) p = syndrome_unknown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) p = memmod_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) "Memory Module \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) smp_processor_id(), scode, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) smp_processor_id(), afsr, afar, udbl, udbh, tl1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* We always log it, even if someone is listening for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * trap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) notify_die(DIE_TRAP, "Correctable ECC Error", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 0, TRAP_TYPE_CEE, SIGTRAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /* The Correctable ECC Error trap does not disable I/D caches. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * we only have to restore the ESTATE Error Enable register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) spitfire_enable_estate_errors();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* XXX add more human friendly logging of the error status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * XXX as is implemented for cheetah
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /* We always log it, even if someone is listening for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * trap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) notify_die(DIE_TRAP, "Uncorrectable Error", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 0, tt, SIGTRAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (tl1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) die_if_kernel("UE", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* XXX need more intelligent processing here, such as is implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * XXX for cheetah errors, in fact if the E-cache still holds the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * XXX line with bad parity this will loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) spitfire_clean_and_reenable_l1_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) spitfire_enable_estate_errors();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (test_thread_flag(TIF_32BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) regs->tpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) regs->tnpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) force_sig_fault(SIGBUS, BUS_OBJERR, (void *)0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) unsigned long afsr, tt, udbh, udbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) int tl1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (tt == TRAP_TYPE_DAE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) spitfire_clean_and_reenable_l1_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) spitfire_enable_estate_errors();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) pci_poke_faulted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) regs->tnpc = regs->tpc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (afsr & SFAFSR_UE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (tt == TRAP_TYPE_CEE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* Handle the case where we took a CEE trap, but ACK'd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * only the UE state in the UDB error registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (afsr & SFAFSR_UE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (udbh & UDBE_CE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) "stxa %0, [%1] %2\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) : "r" (udbh & UDBE_CE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) "r" (0x0), "i" (ASI_UDB_ERROR_W));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (udbl & UDBE_CE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) "stxa %0, [%1] %2\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) : "r" (udbl & UDBE_CE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) "r" (0x18), "i" (ASI_UDB_ERROR_W));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) int cheetah_pcache_forced_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) void cheetah_enable_pcache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) unsigned long dcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) __asm__ __volatile__("ldxa [%%g0] %1, %0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) : "=r" (dcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) : "i" (ASI_DCU_CONTROL_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* Cheetah error trap handling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static unsigned long ecache_flush_physbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static unsigned long ecache_flush_linesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static unsigned long ecache_flush_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* This table is ordered in priority of errors and matches the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * AFAR overwrite policy as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct afsr_error_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static const char CHAFSR_PERR_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) "System interface protocol error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static const char CHAFSR_IERR_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) "Internal processor error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static const char CHAFSR_ISAP_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) "System request parity error on incoming address";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static const char CHAFSR_UCU_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) "Uncorrectable E-cache ECC error for ifetch/data";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static const char CHAFSR_UCC_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) "SW Correctable E-cache ECC error for ifetch/data";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static const char CHAFSR_UE_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) "Uncorrectable system bus data ECC error for read";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static const char CHAFSR_EDU_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) "Uncorrectable E-cache ECC error for stmerge/blkld";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static const char CHAFSR_EMU_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) "Uncorrectable system bus MTAG error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static const char CHAFSR_WDU_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) "Uncorrectable E-cache ECC error for writeback";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static const char CHAFSR_CPU_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) "Uncorrectable ECC error for copyout";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) static const char CHAFSR_CE_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) "HW corrected system bus data ECC error for read";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static const char CHAFSR_EDC_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) "HW corrected E-cache ECC error for stmerge/blkld";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static const char CHAFSR_EMC_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) "HW corrected system bus MTAG ECC error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static const char CHAFSR_WDC_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) "HW corrected E-cache ECC error for writeback";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static const char CHAFSR_CPC_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) "HW corrected ECC error for copyout";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static const char CHAFSR_TO_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) "Unmapped error from system bus";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static const char CHAFSR_BERR_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) "Bus error response from system bus";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static const char CHAFSR_IVC_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) "HW corrected system bus data ECC error for ivec read";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static const char CHAFSR_IVU_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) "Uncorrectable system bus data ECC error for ivec read";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static struct afsr_error_table __cheetah_error_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) { CHAFSR_PERR, CHAFSR_PERR_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) { CHAFSR_IERR, CHAFSR_IERR_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) { CHAFSR_ISAP, CHAFSR_ISAP_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) { CHAFSR_UCU, CHAFSR_UCU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) { CHAFSR_UCC, CHAFSR_UCC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) { CHAFSR_UE, CHAFSR_UE_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) { CHAFSR_EDU, CHAFSR_EDU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) { CHAFSR_EMU, CHAFSR_EMU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) { CHAFSR_WDU, CHAFSR_WDU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) { CHAFSR_CPU, CHAFSR_CPU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) { CHAFSR_CE, CHAFSR_CE_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) { CHAFSR_EDC, CHAFSR_EDC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) { CHAFSR_EMC, CHAFSR_EMC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) { CHAFSR_WDC, CHAFSR_WDC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) { CHAFSR_CPC, CHAFSR_CPC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) { CHAFSR_TO, CHAFSR_TO_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) { CHAFSR_BERR, CHAFSR_BERR_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* These two do not update the AFAR. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) { CHAFSR_IVC, CHAFSR_IVC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) { CHAFSR_IVU, CHAFSR_IVU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) { 0, NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) static const char CHPAFSR_DTO_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) "System bus unmapped error for prefetch/storequeue-read";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static const char CHPAFSR_DBERR_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) "System bus error for prefetch/storequeue-read";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static const char CHPAFSR_THCE_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) "Hardware corrected E-cache Tag ECC error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) static const char CHPAFSR_TSCE_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) "SW handled correctable E-cache Tag ECC error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static const char CHPAFSR_TUE_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) "Uncorrectable E-cache Tag ECC error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) static const char CHPAFSR_DUE_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) "System bus uncorrectable data ECC error due to prefetch/store-fill";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static struct afsr_error_table __cheetah_plus_error_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) { CHAFSR_PERR, CHAFSR_PERR_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) { CHAFSR_IERR, CHAFSR_IERR_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) { CHAFSR_ISAP, CHAFSR_ISAP_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) { CHAFSR_UCU, CHAFSR_UCU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) { CHAFSR_UCC, CHAFSR_UCC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) { CHAFSR_UE, CHAFSR_UE_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) { CHAFSR_EDU, CHAFSR_EDU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) { CHAFSR_EMU, CHAFSR_EMU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) { CHAFSR_WDU, CHAFSR_WDU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) { CHAFSR_CPU, CHAFSR_CPU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) { CHAFSR_CE, CHAFSR_CE_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) { CHAFSR_EDC, CHAFSR_EDC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) { CHAFSR_EMC, CHAFSR_EMC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) { CHAFSR_WDC, CHAFSR_WDC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) { CHAFSR_CPC, CHAFSR_CPC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) { CHAFSR_TO, CHAFSR_TO_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) { CHAFSR_BERR, CHAFSR_BERR_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) { CHPAFSR_DTO, CHPAFSR_DTO_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) { CHPAFSR_THCE, CHPAFSR_THCE_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) { CHPAFSR_TUE, CHPAFSR_TUE_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) { CHPAFSR_DUE, CHPAFSR_DUE_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* These two do not update the AFAR. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) { CHAFSR_IVC, CHAFSR_IVC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) { CHAFSR_IVU, CHAFSR_IVU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) { 0, NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) static const char JPAFSR_JETO_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) "System interface protocol error, hw timeout caused";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static const char JPAFSR_SCE_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) "Parity error on system snoop results";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static const char JPAFSR_JEIC_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) "System interface protocol error, illegal command detected";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static const char JPAFSR_JEIT_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) "System interface protocol error, illegal ADTYPE detected";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static const char JPAFSR_OM_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) "Out of range memory error has occurred";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) static const char JPAFSR_ETP_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) "Parity error on L2 cache tag SRAM";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) static const char JPAFSR_UMS_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) "Error due to unsupported store";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) static const char JPAFSR_RUE_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) "Uncorrectable ECC error from remote cache/memory";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static const char JPAFSR_RCE_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) "Correctable ECC error from remote cache/memory";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static const char JPAFSR_BP_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) "JBUS parity error on returned read data";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) static const char JPAFSR_WBP_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) "JBUS parity error on data for writeback or block store";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static const char JPAFSR_FRC_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) "Foreign read to DRAM incurring correctable ECC error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static const char JPAFSR_FRU_msg[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) "Foreign read to DRAM incurring uncorrectable ECC error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) static struct afsr_error_table __jalapeno_error_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) { JPAFSR_JETO, JPAFSR_JETO_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) { JPAFSR_SCE, JPAFSR_SCE_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) { JPAFSR_JEIC, JPAFSR_JEIC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) { JPAFSR_JEIT, JPAFSR_JEIT_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) { CHAFSR_PERR, CHAFSR_PERR_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) { CHAFSR_IERR, CHAFSR_IERR_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) { CHAFSR_ISAP, CHAFSR_ISAP_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) { CHAFSR_UCU, CHAFSR_UCU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) { CHAFSR_UCC, CHAFSR_UCC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) { CHAFSR_UE, CHAFSR_UE_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) { CHAFSR_EDU, CHAFSR_EDU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) { JPAFSR_OM, JPAFSR_OM_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) { CHAFSR_WDU, CHAFSR_WDU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) { CHAFSR_CPU, CHAFSR_CPU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) { CHAFSR_CE, CHAFSR_CE_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) { CHAFSR_EDC, CHAFSR_EDC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) { JPAFSR_ETP, JPAFSR_ETP_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) { CHAFSR_WDC, CHAFSR_WDC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) { CHAFSR_CPC, CHAFSR_CPC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) { CHAFSR_TO, CHAFSR_TO_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) { CHAFSR_BERR, CHAFSR_BERR_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) { JPAFSR_UMS, JPAFSR_UMS_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) { JPAFSR_RUE, JPAFSR_RUE_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) { JPAFSR_RCE, JPAFSR_RCE_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) { JPAFSR_BP, JPAFSR_BP_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) { JPAFSR_WBP, JPAFSR_WBP_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) { JPAFSR_FRC, JPAFSR_FRC_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) { JPAFSR_FRU, JPAFSR_FRU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /* These two do not update the AFAR. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) { CHAFSR_IVU, CHAFSR_IVU_msg },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) { 0, NULL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static struct afsr_error_table *cheetah_error_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static unsigned long cheetah_afsr_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct cheetah_err_info *cheetah_error_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct cheetah_err_info *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (!cheetah_error_log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) p = cheetah_error_log + (cpu * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if ((afsr & CHAFSR_TL1) != 0UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) extern unsigned int tl0_icpe[], tl1_icpe[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) extern unsigned int tl0_dcpe[], tl1_dcpe[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) extern unsigned int tl0_fecc[], tl1_fecc[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) extern unsigned int tl0_cee[], tl1_cee[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) extern unsigned int tl0_iae[], tl1_iae[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) extern unsigned int tl0_dae[], tl1_dae[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) void __init cheetah_ecache_flush_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) unsigned long largest_size, smallest_linesize, order, ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) int i, sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* Scan all cpu device tree nodes, note two values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * 1) largest E-cache size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * 2) smallest E-cache line size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) largest_size = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) smallest_linesize = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) for (i = 0; i < NR_CPUS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) val = cpu_data(i).ecache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (val > largest_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) largest_size = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) val = cpu_data(i).ecache_line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (val < smallest_linesize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) smallest_linesize = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (largest_size == 0UL || smallest_linesize == ~0UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) "parameters.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ecache_flush_size = (2 * largest_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ecache_flush_linesize = smallest_linesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (ecache_flush_physbase == ~0UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) prom_printf("cheetah_ecache_flush_init: Cannot find %ld byte "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) "contiguous physical memory.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ecache_flush_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /* Now allocate error trap reporting scoreboard. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) for (order = 0; order < MAX_ORDER; order++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if ((PAGE_SIZE << order) >= sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) cheetah_error_log = (struct cheetah_err_info *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) __get_free_pages(GFP_KERNEL, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (!cheetah_error_log) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) prom_printf("cheetah_ecache_flush_init: Failed to allocate "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) "error logging scoreboard (%d bytes).\n", sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) memset(cheetah_error_log, 0, PAGE_SIZE << order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /* Mark all AFSRs as invalid so that the trap handler will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * log new new information there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) for (i = 0; i < 2 * NR_CPUS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) cheetah_error_log[i].afsr = CHAFSR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) __asm__ ("rdpr %%ver, %0" : "=r" (ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if ((ver >> 32) == __JALAPENO_ID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) (ver >> 32) == __SERRANO_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) cheetah_error_table = &__jalapeno_error_table[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) cheetah_afsr_errors = JPAFSR_ERRORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) } else if ((ver >> 32) == 0x003e0015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) cheetah_error_table = &__cheetah_plus_error_table[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) cheetah_afsr_errors = CHPAFSR_ERRORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) cheetah_error_table = &__cheetah_error_table[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) cheetah_afsr_errors = CHAFSR_ERRORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /* Now patch trap tables. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (tlb_type == cheetah_plus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) flushi(PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) static void cheetah_flush_ecache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) unsigned long flush_base = ecache_flush_physbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) unsigned long flush_linesize = ecache_flush_linesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) unsigned long flush_size = ecache_flush_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) " bne,pt %%xcc, 1b\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) " ldxa [%2 + %0] %3, %%g0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) : "=&r" (flush_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) : "0" (flush_size), "r" (flush_base),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) static void cheetah_flush_ecache_line(unsigned long physaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) unsigned long alias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) physaddr &= ~(8UL - 1UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) physaddr = (ecache_flush_physbase +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) alias = physaddr + (ecache_flush_size >> 1UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) "ldxa [%1] %2, %%g0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) : "r" (physaddr), "r" (alias),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) "i" (ASI_PHYS_USE_EC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /* Unfortunately, the diagnostic access to the I-cache tags we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * use to clear the thing interferes with I-cache coherency transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * So we must only flush the I-cache when it is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) static void __cheetah_flush_icache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) unsigned int icache_size, icache_line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) icache_size = local_cpu_data().icache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) icache_line_size = local_cpu_data().icache_line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /* Clear the valid bits in all the tags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) for (addr = 0; addr < icache_size; addr += icache_line_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) : "r" (addr | (2 << 3)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) "i" (ASI_IC_TAG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) static void cheetah_flush_icache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) unsigned long dcu_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /* Save current DCU, disable I-cache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) "or %0, %2, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) "stxa %%g1, [%%g0] %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) : "=r" (dcu_save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) : "g1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) __cheetah_flush_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /* Restore DCU register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) static void cheetah_flush_dcache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) unsigned int dcache_size, dcache_line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) dcache_size = local_cpu_data().dcache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) dcache_line_size = local_cpu_data().dcache_line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) : "r" (addr), "i" (ASI_DCACHE_TAG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* In order to make the even parity correct we must do two things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * First, we clear DC_data_parity and set DC_utag to an appropriate value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * Next, we clear out all 32-bytes of data for that line. Data of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * all-zero + tag parity value of zero == correct parity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) static void cheetah_plus_zap_dcache_parity(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) unsigned int dcache_size, dcache_line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) dcache_size = local_cpu_data().dcache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) dcache_line_size = local_cpu_data().dcache_line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) unsigned long tag = (addr >> 14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) unsigned long line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) __asm__ __volatile__("membar #Sync\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) "stxa %0, [%1] %2\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) : "r" (tag), "r" (addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) "i" (ASI_DCACHE_UTAG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) for (line = addr; line < addr + dcache_line_size; line += 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) __asm__ __volatile__("membar #Sync\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) "stxa %%g0, [%0] %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) : "r" (line),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) "i" (ASI_DCACHE_DATA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /* Conversion tables used to frob Cheetah AFSR syndrome values into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * something palatable to the memory controller driver get_unumber
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) #define MT0 137
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) #define MT1 138
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) #define MT2 139
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) #define NONE 254
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) #define MTC0 140
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) #define MTC1 141
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) #define MTC2 142
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) #define MTC3 143
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) #define C0 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) #define C1 129
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) #define C2 130
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) #define C3 131
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) #define C4 132
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) #define C5 133
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) #define C6 134
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) #define C7 135
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) #define C8 136
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) #define M2 144
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) #define M3 145
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) #define M4 146
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) #define M 147
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static unsigned char cheetah_ecc_syntab[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) static unsigned char cheetah_mtag_syntab[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) NONE, MTC0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) MTC1, NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) MTC2, NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) NONE, MT0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) MTC3, NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) NONE, MT1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) NONE, MT2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) NONE, NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /* Return the highest priority error conditon mentioned. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) static inline unsigned long cheetah_get_hipri(unsigned long afsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) unsigned long tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) for (i = 0; cheetah_error_table[i].mask; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static const char *cheetah_get_string(unsigned long bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) for (i = 0; cheetah_error_table[i].mask; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if ((bit & cheetah_error_table[i].mask) != 0UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return cheetah_error_table[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return "???";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) unsigned long afsr, unsigned long afar, int recoverable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) unsigned long hipri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) char unum[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) afsr, afar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) (afsr & CHAFSR_TL1) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) printk("%s" "ERROR(%d): ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) printk("TPC<%pS>\n", (void *) regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) hipri = cheetah_get_hipri(afsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) hipri, cheetah_get_string(hipri));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /* Try to get unumber if relevant. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) CHAFSR_CPC | CHAFSR_CPU | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) CHAFSR_UE | CHAFSR_CE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) CHAFSR_EDC | CHAFSR_EDU | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) CHAFSR_UCC | CHAFSR_UCU | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) CHAFSR_WDU | CHAFSR_WDC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (afsr & ESYND_ERRORS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) int syndrome;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) syndrome = cheetah_ecc_syntab[syndrome];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (ret != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) (recoverable ? KERN_WARNING : KERN_CRIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) smp_processor_id(), unum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) } else if (afsr & MSYND_ERRORS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) int syndrome;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) syndrome = cheetah_mtag_syntab[syndrome];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (ret != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) (recoverable ? KERN_WARNING : KERN_CRIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) smp_processor_id(), unum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /* Now dump the cache snapshots. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) (int) info->dcache_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) info->dcache_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) info->dcache_utag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) info->dcache_stag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) info->dcache_data[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) info->dcache_data[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) info->dcache_data[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) info->dcache_data[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) "u[%016llx] l[%016llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) (int) info->icache_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) info->icache_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) info->icache_utag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) info->icache_stag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) info->icache_upper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) info->icache_lower);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) info->icache_data[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) info->icache_data[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) info->icache_data[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) info->icache_data[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) info->icache_data[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) info->icache_data[5],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) info->icache_data[6],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) info->icache_data[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) (int) info->ecache_index, info->ecache_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) info->ecache_data[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) info->ecache_data[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) info->ecache_data[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) info->ecache_data[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) afsr = (afsr & ~hipri) & cheetah_afsr_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) while (afsr != 0UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) unsigned long bit = cheetah_get_hipri(afsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) (recoverable ? KERN_WARNING : KERN_CRIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) bit, cheetah_get_string(bit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) afsr &= ~bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (!recoverable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static int cheetah_recheck_errors(struct cheetah_err_info *logp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) unsigned long afsr, afar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) : "=r" (afsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) : "i" (ASI_AFSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if ((afsr & cheetah_afsr_errors) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (logp != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) : "=r" (afar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) : "i" (ASI_AFAR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) logp->afsr = afsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) logp->afar = afar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) "membar #Sync\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) : : "r" (afsr), "i" (ASI_AFSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct cheetah_err_info local_snapshot, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) int recoverable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) /* Flush E-cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) cheetah_flush_ecache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) p = cheetah_get_error_log(afsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) afsr, afar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /* Grab snapshot of logged error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) memcpy(&local_snapshot, p, sizeof(local_snapshot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /* If the current trap snapshot does not match what the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * trap handler passed along into our args, big trouble.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * In such a case, mark the local copy as invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * Else, it matches and we mark the afsr in the non-local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * copy as invalid so we may log new error traps there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) if (p->afsr != afsr || p->afar != afar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) local_snapshot.afsr = CHAFSR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) p->afsr = CHAFSR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) cheetah_flush_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) cheetah_flush_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) /* Re-enable I-cache/D-cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) "or %%g1, %1, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) "stxa %%g1, [%%g0] %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) : "i" (ASI_DCU_CONTROL_REG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) "i" (DCU_DC | DCU_IC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) : "g1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) /* Re-enable error reporting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) "or %%g1, %1, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) "stxa %%g1, [%%g0] %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) : "i" (ASI_ESTATE_ERROR_EN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) : "g1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /* Decide if we can continue after handling this trap and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * logging the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) recoverable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /* Re-check AFSR/AFAR. What we are looking for here is whether a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * error was logged while we had error reporting traps disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (cheetah_recheck_errors(&local_snapshot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) unsigned long new_afsr = local_snapshot.afsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) /* If we got a new asynchronous error, die... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) CHAFSR_WDU | CHAFSR_CPU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) CHAFSR_IVU | CHAFSR_UE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) CHAFSR_BERR | CHAFSR_TO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /* Log errors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (!recoverable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) panic("Irrecoverable Fast-ECC error trap.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /* Flush E-cache to kick the error trap handlers out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) cheetah_flush_ecache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) /* Try to fix a correctable error by pushing the line out from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * the E-cache. Recheck error reporting registers to see if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * problem is intermittent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) static int cheetah_fix_ce(unsigned long physaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) unsigned long orig_estate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) unsigned long alias1, alias2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /* Make sure correctable error traps are disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) "andn %0, %1, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) "stxa %%g1, [%%g0] %2\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) : "=&r" (orig_estate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) : "i" (ESTATE_ERROR_CEEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) "i" (ASI_ESTATE_ERROR_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) : "g1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /* We calculate alias addresses that will force the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * cache line in question out of the E-cache. Then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * we bring it back in with an atomic instruction so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * that we get it in some modified/exclusive state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * then we displace it again to try and get proper ECC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * pushed back into the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) physaddr &= ~(8UL - 1UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) alias1 = (ecache_flush_physbase +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) (physaddr & ((ecache_flush_size >> 1) - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) alias2 = alias1 + (ecache_flush_size >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) "ldxa [%1] %3, %%g0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) "casxa [%2] %3, %%g0, %%g0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) "ldxa [%0] %3, %%g0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) "ldxa [%1] %3, %%g0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) : "r" (alias1), "r" (alias2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) "r" (physaddr), "i" (ASI_PHYS_USE_EC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /* Did that trigger another error? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (cheetah_recheck_errors(NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) /* Try one more time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (cheetah_recheck_errors(NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) ret = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) /* No new error, intermittent problem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) /* Restore error enables. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) /* Return non-zero if PADDR is a valid physical memory address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) static int cheetah_check_main_memory(unsigned long paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) unsigned long vaddr = PAGE_OFFSET + paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (vaddr > (unsigned long) high_memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) return kern_addr_valid(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct cheetah_err_info local_snapshot, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) int recoverable, is_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) p = cheetah_get_error_log(afsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) afsr, afar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) /* Grab snapshot of logged error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) memcpy(&local_snapshot, p, sizeof(local_snapshot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /* If the current trap snapshot does not match what the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * trap handler passed along into our args, big trouble.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * In such a case, mark the local copy as invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * Else, it matches and we mark the afsr in the non-local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * copy as invalid so we may log new error traps there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (p->afsr != afsr || p->afar != afar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) local_snapshot.afsr = CHAFSR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) p->afsr = CHAFSR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) is_memory = cheetah_check_main_memory(afar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) /* XXX Might want to log the results of this operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * XXX somewhere... -DaveM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) cheetah_fix_ce(afar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) int flush_all, flush_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) flush_all = flush_line = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if ((afsr & CHAFSR_EDC) != 0UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) flush_line = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) flush_all = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) } else if ((afsr & CHAFSR_CPC) != 0UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) flush_line = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) flush_all = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) /* Trap handler only disabled I-cache, flush it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) cheetah_flush_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) /* Re-enable I-cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) "or %%g1, %1, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) "stxa %%g1, [%%g0] %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) : "i" (ASI_DCU_CONTROL_REG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) "i" (DCU_IC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) : "g1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (flush_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) cheetah_flush_ecache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) else if (flush_line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) cheetah_flush_ecache_line(afar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) /* Re-enable error reporting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) "or %%g1, %1, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) "stxa %%g1, [%%g0] %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) : "i" (ASI_ESTATE_ERROR_EN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) "i" (ESTATE_ERROR_CEEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) : "g1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /* Decide if we can continue after handling this trap and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * logging the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) recoverable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) /* Re-check AFSR/AFAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) (void) cheetah_recheck_errors(&local_snapshot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /* Log errors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) if (!recoverable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) panic("Irrecoverable Correctable-ECC error trap.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) struct cheetah_err_info local_snapshot, *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) int recoverable, is_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /* Check for the special PCI poke sequence. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) cheetah_flush_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) cheetah_flush_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /* Re-enable I-cache/D-cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) "or %%g1, %1, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) "stxa %%g1, [%%g0] %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) : "i" (ASI_DCU_CONTROL_REG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) "i" (DCU_DC | DCU_IC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) : "g1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) /* Re-enable error reporting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) "or %%g1, %1, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) "stxa %%g1, [%%g0] %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) : "i" (ASI_ESTATE_ERROR_EN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) : "g1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) (void) cheetah_recheck_errors(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) pci_poke_faulted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) regs->tpc += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) regs->tnpc = regs->tpc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) p = cheetah_get_error_log(afsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) afsr, afar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) /* Grab snapshot of logged error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) memcpy(&local_snapshot, p, sizeof(local_snapshot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) /* If the current trap snapshot does not match what the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * trap handler passed along into our args, big trouble.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * In such a case, mark the local copy as invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * Else, it matches and we mark the afsr in the non-local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * copy as invalid so we may log new error traps there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (p->afsr != afsr || p->afar != afar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) local_snapshot.afsr = CHAFSR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) p->afsr = CHAFSR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) is_memory = cheetah_check_main_memory(afar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) int flush_all, flush_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) flush_all = flush_line = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if ((afsr & CHAFSR_EDU) != 0UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) flush_line = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) flush_all = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) } else if ((afsr & CHAFSR_BERR) != 0UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) flush_line = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) flush_all = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) cheetah_flush_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) cheetah_flush_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) /* Re-enable I/D caches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) "or %%g1, %1, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) "stxa %%g1, [%%g0] %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) : "i" (ASI_DCU_CONTROL_REG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) "i" (DCU_IC | DCU_DC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) : "g1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (flush_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) cheetah_flush_ecache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) else if (flush_line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) cheetah_flush_ecache_line(afar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) /* Re-enable error reporting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) "or %%g1, %1, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) "stxa %%g1, [%%g0] %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) : "i" (ASI_ESTATE_ERROR_EN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) : "g1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) /* Decide if we can continue after handling this trap and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * logging the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) recoverable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) /* Re-check AFSR/AFAR. What we are looking for here is whether a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * error was logged while we had error reporting traps disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (cheetah_recheck_errors(&local_snapshot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) unsigned long new_afsr = local_snapshot.afsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) /* If we got a new asynchronous error, die... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) CHAFSR_WDU | CHAFSR_CPU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) CHAFSR_IVU | CHAFSR_UE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) CHAFSR_BERR | CHAFSR_TO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) /* Log errors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) /* "Recoverable" here means we try to yank the page from ever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) * being newly used again. This depends upon a few things:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) * 1) Must be main memory, and AFAR must be valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) * 2) If we trapped from user, OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) * 3) Else, if we trapped from kernel we must find exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) * table entry (ie. we have to have been accessing user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) * space).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) * If AFAR is not in main memory, or we trapped from kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) * and cannot find an exception table entry, it is unacceptable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * to try and continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) if (recoverable && is_memory) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if ((regs->tstate & TSTATE_PRIV) == 0UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /* OK, usermode access. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) recoverable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) const struct exception_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) entry = search_exception_tables(regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) if (entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) /* OK, kernel access to userspace. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) recoverable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) /* BAD, privileged state is corrupted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (recoverable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (pfn_valid(afar >> PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) get_page(pfn_to_page(afar >> PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) /* Only perform fixup if we still have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * recoverable condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) if (recoverable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) regs->tpc = entry->fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) regs->tnpc = regs->tpc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) recoverable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (!recoverable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) panic("Irrecoverable deferred error trap.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) /* Handle a D/I cache parity error trap. TYPE is encoded as:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) * Bit0: 0=dcache,1=icache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) * Bit1: 0=recoverable,1=unrecoverable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) * The hardware has disabled both the I-cache and D-cache in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) * the %dcr register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) void cheetah_plus_parity_error(int type, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (type & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) __cheetah_flush_icache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) cheetah_plus_zap_dcache_parity();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) cheetah_flush_dcache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) /* Re-enable I-cache/D-cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) "or %%g1, %1, %%g1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) "stxa %%g1, [%%g0] %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) "membar #Sync"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) : /* no outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) : "i" (ASI_DCU_CONTROL_REG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) "i" (DCU_DC | DCU_IC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) : "g1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (type & 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) (type & 0x1) ? 'I' : 'D',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) panic("Irrecoverable Cheetah+ parity error.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) (type & 0x1) ? 'I' : 'D',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) struct sun4v_error_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) /* Unique error handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) /*0x00*/u64 err_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) /* %stick value at the time of the error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) /*0x08*/u64 err_stick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) /*0x10*/u8 reserved_1[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) /* Error type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) /*0x13*/u8 err_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) #define SUN4V_ERR_TYPE_UNDEFINED 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) #define SUN4V_ERR_TYPE_PRECISE_NONRES 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) #define SUN4V_ERR_TYPE_SHUTDOWN_RQST 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) #define SUN4V_ERR_TYPE_DUMP_CORE 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) #define SUN4V_ERR_TYPE_SP_STATE_CHANGE 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) #define SUN4V_ERR_TYPE_NUM 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) /* Error attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) /*0x14*/u32 err_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) #define SUN4V_ERR_ATTRS_MEMORY 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) #define SUN4V_ERR_ATTRS_PIO 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) #define SUN4V_ERR_ATTRS_SHUTDOWN_RQST 0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) #define SUN4V_ERR_ATTRS_ASR 0x00000040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) #define SUN4V_ERR_ATTRS_ASI 0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) #define SUN4V_ERR_ATTRS_PRIV_REG 0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) #define SUN4V_ERR_ATTRS_SPSTATE_MSK 0x00000600
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) #define SUN4V_ERR_ATTRS_MCD 0x00000800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) #define SUN4V_ERR_ATTRS_SPSTATE_SHFT 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) #define SUN4V_ERR_ATTRS_MODE_MSK 0x03000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) #define SUN4V_ERR_ATTRS_MODE_SHFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) #define SUN4V_ERR_SPSTATE_FAULTED 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) #define SUN4V_ERR_SPSTATE_AVAILABLE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) #define SUN4V_ERR_SPSTATE_NOT_PRESENT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) #define SUN4V_ERR_MODE_USER 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) #define SUN4V_ERR_MODE_PRIV 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) /* Real address of the memory region or PIO transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) /*0x18*/u64 err_raddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) /* Size of the operation triggering the error, in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) /*0x20*/u32 err_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /* ID of the CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) /*0x24*/u16 err_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) /* Grace periof for shutdown, in seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) /*0x26*/u16 err_secs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) /* Value of the %asi register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) /*0x28*/u8 err_asi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) /*0x29*/u8 reserved_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) /* Value of the ASR register number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) /*0x2a*/u16 err_asr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) #define SUN4V_ERR_ASR_VALID 0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) /*0x2c*/u32 reserved_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) /*0x30*/u64 reserved_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /*0x38*/u64 reserved_5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) static const char *sun4v_err_type_to_str(u8 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) static const char *types[SUN4V_ERR_TYPE_NUM] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) "undefined",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) "uncorrected resumable",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) "precise nonresumable",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) "deferred nonresumable",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) "shutdown request",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) "dump core",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) "SP state change",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (type < SUN4V_ERR_TYPE_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) return types[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) static void sun4v_emit_err_attr_strings(u32 attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) static const char *attr_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) "processor",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) "memory",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) "PIO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) "int-registers",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) "fpu-registers",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) "shutdown-request",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) "ASR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) "ASI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) "priv-reg",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) static const char *sp_states[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) "sp-faulted",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) "sp-available",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) "sp-not-present",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) "sp-state-reserved",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) static const char *modes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) "mode-reserved0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) "user",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) "priv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) "mode-reserved1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) u32 sp_state, mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (attrs & (1U << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) const char *s = attr_names[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) pr_cont("%s ", s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) sp_state = ((attrs & SUN4V_ERR_ATTRS_SPSTATE_MSK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) SUN4V_ERR_ATTRS_SPSTATE_SHFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) pr_cont("%s ", sp_states[sp_state]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) mode = ((attrs & SUN4V_ERR_ATTRS_MODE_MSK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) SUN4V_ERR_ATTRS_MODE_SHFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) pr_cont("%s ", modes[mode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) if (attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) pr_cont("res-queue-full ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) /* When the report contains a real-address of "-1" it means that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) * hardware did not provide the address. So we compute the effective
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) * address of the load or store instruction at regs->tpc and report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) * that. Usually when this happens it's a PIO and in such a case we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) * are using physical addresses with bypass ASIs anyways, so what we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) * report here is exactly what we want.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) unsigned int insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (!(regs->tstate & TSTATE_PRIV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) insn = *(unsigned int *) regs->tpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) addr = compute_effective_address(regs, insn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) printk("%s: insn effective address [0x%016llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) pfx, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) int cpu, const char *pfx, atomic_t *ocnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) u64 *raw_ptr = (u64 *) ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) u32 attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) printk("%s: Reporting on cpu %d\n", pfx, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) printk("%s: TPC [0x%016lx] <%pS>\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) pfx, regs->tpc, (void *) regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) printk("%s: RAW [%016llx:%016llx:%016llx:%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) pfx, raw_ptr[0], raw_ptr[1], raw_ptr[2], raw_ptr[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) printk("%s: %016llx:%016llx:%016llx:%016llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) pfx, raw_ptr[4], raw_ptr[5], raw_ptr[6], raw_ptr[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) printk("%s: handle [0x%016llx] stick [0x%016llx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) pfx, ent->err_handle, ent->err_stick);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) printk("%s: type [%s]\n", pfx, sun4v_err_type_to_str(ent->err_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) attrs = ent->err_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) printk("%s: attrs [0x%08x] < ", pfx, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) sun4v_emit_err_attr_strings(attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) pr_cont(">\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) /* Various fields in the error report are only valid if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * certain attribute bits are set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) if (attrs & (SUN4V_ERR_ATTRS_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) SUN4V_ERR_ATTRS_PIO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) SUN4V_ERR_ATTRS_ASI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) printk("%s: raddr [0x%016llx]\n", pfx, ent->err_raddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (ent->err_raddr == ~(u64)0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) sun4v_report_real_raddr(pfx, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (attrs & (SUN4V_ERR_ATTRS_MEMORY | SUN4V_ERR_ATTRS_ASI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) printk("%s: size [0x%x]\n", pfx, ent->err_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (attrs & (SUN4V_ERR_ATTRS_PROCESSOR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) SUN4V_ERR_ATTRS_INT_REGISTERS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) SUN4V_ERR_ATTRS_FPU_REGISTERS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) SUN4V_ERR_ATTRS_PRIV_REG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) printk("%s: cpu[%u]\n", pfx, ent->err_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) if (attrs & SUN4V_ERR_ATTRS_ASI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) printk("%s: asi [0x%02x]\n", pfx, ent->err_asi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if ((attrs & (SUN4V_ERR_ATTRS_INT_REGISTERS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) SUN4V_ERR_ATTRS_FPU_REGISTERS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) SUN4V_ERR_ATTRS_PRIV_REG)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) (ent->err_asr & SUN4V_ERR_ASR_VALID) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) printk("%s: reg [0x%04x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) pfx, ent->err_asr & ~SUN4V_ERR_ASR_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) if ((cnt = atomic_read(ocnt)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) atomic_set(ocnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) printk("%s: Queue overflowed %d times.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) pfx, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) /* Handle memory corruption detected error which is vectored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) * through resumable error trap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) void do_mcd_err(struct pt_regs *regs, struct sun4v_error_entry ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) if (notify_die(DIE_TRAP, "MCD error", regs, 0, 0x34,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) SIGSEGV) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) /* MCD exception could happen because the task was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) * running a system call with MCD enabled and passed a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) * non-versioned pointer or pointer with bad version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * tag to the system call. In such cases, hypervisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * places the address of offending instruction in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) * resumable error report. This is a deferred error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * so the read/write that caused the trap was potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * retired long time back and we may have no choice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * but to send SIGSEGV to the process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) const struct exception_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) entry = search_exception_tables(regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) /* Looks like a bad syscall parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) #ifdef DEBUG_EXCEPTIONS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) pr_emerg("Exception: PC<%016lx> faddr<UNKNOWN>\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) pr_emerg("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) ent.err_raddr, entry->fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) regs->tpc = entry->fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) regs->tnpc = regs->tpc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) /* Send SIGSEGV to the userspace process with the right signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) * code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) force_sig_fault(SIGSEGV, SEGV_ADIDERR, (void __user *)ent.err_raddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) * Log the event and clear the first word of the entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) struct sun4v_error_entry *ent, local_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) struct trap_per_cpu *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) unsigned long paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) tb = &trap_block[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) paddr = tb->resum_kernel_buf_pa + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) ent = __va(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) /* We have a local copy now, so release the entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) ent->err_handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (local_copy.err_type == SUN4V_ERR_TYPE_SHUTDOWN_RQST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) /* We should really take the seconds field of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) * the error report and use it for the shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) * invocation, but for now do the same thing we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * do for a DS shutdown request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) pr_info("Shutdown request, %u seconds...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) local_copy.err_secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) orderly_poweroff(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) /* If this is a memory corruption detected error vectored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) * by HV through resumable error trap, call the handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) if (local_copy.err_attrs & SUN4V_ERR_ATTRS_MCD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) do_mcd_err(regs, local_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) sun4v_log_error(regs, &local_copy, cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) KERN_ERR "RESUMABLE ERROR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) &sun4v_resum_oflow_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) /* If we try to printk() we'll probably make matters worse, by trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * to retake locks this cpu already holds or causing more errors. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * just bump a counter, and we'll report these counter bumps above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) void sun4v_resum_overflow(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) atomic_inc(&sun4v_resum_oflow_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) /* Given a set of registers, get the virtual addressi that was being accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) * by the faulting instructions at tpc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) unsigned int insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) return compute_effective_address(regs, insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) (insn >> 25) & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) /* Attempt to handle non-resumable errors generated from userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) * Returns true if the signal was handled, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) struct sun4v_error_entry *ent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) unsigned int attrs = ent->err_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) unsigned long addr = ent->err_raddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (addr == ~(u64)0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) /* This seems highly unlikely to ever occur */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) /* Break the unfortunate news. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) page_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) while (page_cnt-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) if (pfn_valid(addr >> PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) get_page(pfn_to_page(addr >> PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) addr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) force_sig(SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (attrs & SUN4V_ERR_ATTRS_PIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) force_sig_fault(SIGBUS, BUS_ADRERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) (void __user *)sun4v_get_vaddr(regs), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) /* Default to doing nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) * Log the event, clear the first word of the entry, and die.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) struct sun4v_error_entry *ent, local_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) struct trap_per_cpu *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) unsigned long paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) tb = &trap_block[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) paddr = tb->nonresum_kernel_buf_pa + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) ent = __va(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) /* We have a local copy now, so release the entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) ent->err_handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) if (!(regs->tstate & TSTATE_PRIV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) sun4v_nonresum_error_user_handled(regs, &local_copy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) /* DON'T PANIC: This userspace error was handled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) /* Check for the special PCI poke sequence. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) if (pci_poke_in_progress && pci_poke_cpu == cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) pci_poke_faulted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) regs->tpc += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) regs->tnpc = regs->tpc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) sun4v_log_error(regs, &local_copy, cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) KERN_EMERG "NON-RESUMABLE ERROR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) &sun4v_nonresum_oflow_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) panic("Non-resumable error.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) /* If we try to printk() we'll probably make matters worse, by trying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) * to retake locks this cpu already holds or causing more errors. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * just bump a counter, and we'll report these counter bumps above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) void sun4v_nonresum_overflow(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) /* XXX Actually even this can make not that much sense. Perhaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) * XXX we should just pull the plug and panic directly from here?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) atomic_inc(&sun4v_nonresum_oflow_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) static void sun4v_tlb_error(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) die_if_kernel("TLB/TSB error", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) unsigned long sun4v_err_itlb_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) unsigned long sun4v_err_itlb_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) unsigned long sun4v_err_itlb_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) unsigned long sun4v_err_itlb_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) regs->tpc, tl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) (void *) regs->u_regs[UREG_I7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) "pte[%lx] error[%lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) sun4v_err_itlb_pte, sun4v_err_itlb_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) sun4v_tlb_error(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) unsigned long sun4v_err_dtlb_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) unsigned long sun4v_err_dtlb_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) unsigned long sun4v_err_dtlb_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) unsigned long sun4v_err_dtlb_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) regs->tpc, tl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) (void *) regs->u_regs[UREG_I7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) "pte[%lx] error[%lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) sun4v_tlb_error(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) void hypervisor_tlbop_error(unsigned long err, unsigned long op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) err, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) err, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) static void do_fpe_common(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) regs->tpc = regs->tnpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) regs->tnpc += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) unsigned long fsr = current_thread_info()->xfsr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) int code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) if (test_thread_flag(TIF_32BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) regs->tpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) regs->tnpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) code = FPE_FLTUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) if ((fsr & 0x1c000) == (1 << 14)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) if (fsr & 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) code = FPE_FLTINV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) else if (fsr & 0x08)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) code = FPE_FLTOVF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) else if (fsr & 0x04)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) code = FPE_FLTUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) else if (fsr & 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) code = FPE_FLTDIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) else if (fsr & 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) code = FPE_FLTRES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) force_sig_fault(SIGFPE, code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) (void __user *)regs->tpc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) void do_fpieee(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 0, 0x24, SIGFPE) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) do_fpe_common(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) void do_fpother(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) struct fpustate *f = FPUSTATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) if (notify_die(DIE_TRAP, "fpu exception other", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 0, 0x25, SIGFPE) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) case (2 << 14): /* unfinished_FPop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) case (3 << 14): /* unimplemented_FPop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) ret = do_mathemu(regs, f, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) do_fpe_common(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) void do_tof(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 0, 0x26, SIGEMT) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if (regs->tstate & TSTATE_PRIV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) die_if_kernel("Penguin overflow trap from kernel mode", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) if (test_thread_flag(TIF_32BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) regs->tpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) regs->tnpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) force_sig_fault(SIGEMT, EMT_TAGOVF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) (void __user *)regs->tpc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) void do_div0(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) if (notify_die(DIE_TRAP, "integer division by zero", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 0, 0x28, SIGFPE) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) if (regs->tstate & TSTATE_PRIV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) die_if_kernel("TL0: Kernel divide by zero.", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) if (test_thread_flag(TIF_32BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) regs->tpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) regs->tnpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) force_sig_fault(SIGFPE, FPE_INTDIV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) (void __user *)regs->tpc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) static void instruction_dump(unsigned int *pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) if ((((unsigned long) pc) & 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) printk("Instruction DUMP:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) for (i = -3; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) static void user_instruction_dump(unsigned int __user *pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) unsigned int buf[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) if ((((unsigned long) pc) & 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) if (copy_from_user(buf, pc - 3, sizeof(buf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) printk("Instruction DUMP:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) for (i = 0; i < 9; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) void show_stack(struct task_struct *tsk, unsigned long *_ksp, const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) unsigned long fp, ksp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) struct thread_info *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) int graph = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) ksp = (unsigned long) _ksp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) if (!tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) tp = task_thread_info(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (ksp == 0UL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (tsk == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) asm("mov %%fp, %0" : "=r" (ksp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) ksp = tp->ksp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (tp == current_thread_info())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) flushw_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) fp = ksp + STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) printk("%sCall Trace:\n", loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) struct sparc_stackf *sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) struct pt_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) unsigned long pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) if (!kstack_valid(tp, fp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) sf = (struct sparc_stackf *) fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) regs = (struct pt_regs *) (sf + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) if (kstack_is_trap_frame(tp, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) if (!(regs->tstate & TSTATE_PRIV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) pc = regs->tpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) fp = regs->u_regs[UREG_I6] + STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) pc = sf->callers_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) fp = (unsigned long)sf->fp + STACK_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) print_ip_sym(loglvl, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) if ((pc + 8UL) == (unsigned long) &return_to_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) struct ftrace_ret_stack *ret_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) ret_stack = ftrace_graph_get_ret_stack(tsk, graph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) if (ret_stack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) pc = ret_stack->ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) print_ip_sym(loglvl, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) graph++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) } while (++count < 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) unsigned long fp = rw->ins[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) if (!fp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) return (struct reg_window *) (fp + STACK_BIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) static int die_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) /* Amuse the user. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) printk(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) " \\|/ ____ \\|/\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) " \"@'/ .. \\`@\"\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) " /_| \\__/ |_\\\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) " \\__U_/\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) __asm__ __volatile__("flushw");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) struct thread_info *tp = current_thread_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) struct reg_window *rw = (struct reg_window *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) (regs->u_regs[UREG_FP] + STACK_BIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) /* Stop the back trace when we hit userland or we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) * find some badly aligned kernel stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) while (rw &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) count++ < 30 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) kstack_valid(tp, (unsigned long) rw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) printk("Caller[%016lx]: %pS\n", rw->ins[7],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) (void *) rw->ins[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) rw = kernel_stack_up(rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) instruction_dump ((unsigned int *) regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) if (test_thread_flag(TIF_32BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) regs->tpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) regs->tnpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) user_instruction_dump ((unsigned int __user *) regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) if (panic_on_oops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) panic("Fatal exception");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) if (regs->tstate & TSTATE_PRIV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) do_exit(SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) do_exit(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) EXPORT_SYMBOL(die_if_kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) void do_illegal_instruction(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) unsigned long pc = regs->tpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) unsigned long tstate = regs->tstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) u32 insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) if (notify_die(DIE_TRAP, "illegal instruction", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 0, 0x10, SIGILL) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) if (tstate & TSTATE_PRIV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) die_if_kernel("Kernel illegal instruction", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) if (test_thread_flag(TIF_32BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) pc = (u32)pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) if (handle_popc(insn, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) if (handle_ldf_stq(insn, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) } else if (tlb_type == hypervisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) if (!vis_emul(regs, insn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) struct fpustate *f = FPUSTATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) /* On UltraSPARC T2 and later, FPU insns which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) * are not implemented in HW signal an illegal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) * instruction trap and do not set the FP Trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) * Trap in the %fsr to unimplemented_FPop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) if (do_mathemu(regs, f, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) if (notify_die(DIE_TRAP, "memory address unaligned", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 0, 0x34, SIGSEGV) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (is_no_fault_exception(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)sfar, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) if (notify_die(DIE_TRAP, "memory address unaligned", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 0, 0x34, SIGSEGV) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) if (is_no_fault_exception(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) /* sun4v_mem_corrupt_detect_precise() - Handle precise exception on an ADI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) * tag mismatch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) * ADI version tag mismatch on a load from memory always results in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) * precise exception. Tag mismatch on a store to memory will result in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) * precise exception if MCDPER or PMCDPER is set to 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) void sun4v_mem_corrupt_detect_precise(struct pt_regs *regs, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) unsigned long context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) if (notify_die(DIE_TRAP, "memory corruption precise exception", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 0, 0x8, SIGSEGV) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) /* MCD exception could happen because the task was running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) * a system call with MCD enabled and passed a non-versioned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) * pointer or pointer with bad version tag to the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) * call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) const struct exception_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) entry = search_exception_tables(regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) if (entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) /* Looks like a bad syscall parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) #ifdef DEBUG_EXCEPTIONS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) pr_emerg("Exception: PC<%016lx> faddr<UNKNOWN>\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) pr_emerg("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) regs->tpc, entry->fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) regs->tpc = entry->fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) regs->tnpc = regs->tpc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) pr_emerg("%s: ADDR[%016lx] CTX[%lx], going.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) __func__, addr, context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) die_if_kernel("MCD precise", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) if (test_thread_flag(TIF_32BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) regs->tpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) regs->tnpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) force_sig_fault(SIGSEGV, SEGV_ADIPERR, (void __user *)addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) void do_privop(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) if (notify_die(DIE_TRAP, "privileged operation", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 0, 0x11, SIGILL) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) if (test_thread_flag(TIF_32BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) regs->tpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) regs->tnpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) force_sig_fault(SIGILL, ILL_PRVOPC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) (void __user *)regs->tpc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) void do_privact(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) do_privop(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) /* Trap level 1 stuff or other traps we should never see... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) void do_cee(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) die_if_kernel("TL0: Cache Error Exception", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) void do_div0_tl1(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) die_if_kernel("TL1: DIV0 Exception", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) void do_fpieee_tl1(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) die_if_kernel("TL1: FPU IEEE Exception", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) void do_fpother_tl1(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) die_if_kernel("TL1: FPU Other Exception", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) void do_ill_tl1(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) die_if_kernel("TL1: Illegal Instruction Exception", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) void do_irq_tl1(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) die_if_kernel("TL1: IRQ Exception", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) void do_lddfmna_tl1(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) die_if_kernel("TL1: LDDF Exception", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) void do_stdfmna_tl1(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) die_if_kernel("TL1: STDF Exception", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) void do_paw(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) die_if_kernel("TL0: Phys Watchpoint Exception", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) void do_paw_tl1(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) die_if_kernel("TL1: Phys Watchpoint Exception", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) void do_vaw(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) die_if_kernel("TL0: Virt Watchpoint Exception", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) void do_vaw_tl1(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) die_if_kernel("TL1: Virt Watchpoint Exception", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) void do_tof_tl1(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) die_if_kernel("TL1: Tag Overflow Exception", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) void do_getpsr(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) regs->tpc = regs->tnpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) regs->tnpc += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) if (test_thread_flag(TIF_32BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) regs->tpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) regs->tnpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) u64 cpu_mondo_counter[NR_CPUS] = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) struct trap_per_cpu trap_block[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) EXPORT_SYMBOL(trap_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) /* This can get invoked before sched_init() so play it super safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) * and use hard_smp_processor_id().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) void notrace init_cur_cpu_trap(struct thread_info *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) int cpu = hard_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) struct trap_per_cpu *p = &trap_block[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) p->thread = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) p->pgd_paddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) extern void thread_info_offsets_are_bolixed_dave(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) extern void trap_per_cpu_offsets_are_bolixed_dave(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) extern void tsb_config_offsets_are_bolixed_dave(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) /* Only invoked on boot processor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) void __init trap_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) /* Compile time sanity check. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) TI_FLAGS != offsetof(struct thread_info, flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) TI_CPU != offsetof(struct thread_info, cpu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) TI_KSP != offsetof(struct thread_info, ksp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) TI_FAULT_ADDR != offsetof(struct thread_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) fault_address) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) TI_KREGS != offsetof(struct thread_info, kregs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) TI_UTRAPS != offsetof(struct thread_info, utraps) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) TI_REG_WINDOW != offsetof(struct thread_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) reg_window) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) TI_RWIN_SPTRS != offsetof(struct thread_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) rwbuf_stkptrs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) TI_GSR != offsetof(struct thread_info, gsr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) TI_XFSR != offsetof(struct thread_info, xfsr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) TI_PRE_COUNT != offsetof(struct thread_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) preempt_count) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) TI_CURRENT_DS != offsetof(struct thread_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) current_ds) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) TI_KUNA_REGS != offsetof(struct thread_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) kern_una_regs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) TI_KUNA_INSN != offsetof(struct thread_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) kern_una_insn) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) TI_FPREGS != offsetof(struct thread_info, fpregs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) (TI_FPREGS & (64 - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) thread) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) (TRAP_PER_CPU_PGD_PADDR !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) offsetof(struct trap_per_cpu, pgd_paddr)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) (TRAP_PER_CPU_CPU_MONDO_PA !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) (TRAP_PER_CPU_DEV_MONDO_PA !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) (TRAP_PER_CPU_RESUM_MONDO_PA !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) (TRAP_PER_CPU_RESUM_KBUF_PA !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) (TRAP_PER_CPU_FAULT_INFO !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) offsetof(struct trap_per_cpu, fault_info)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) (TRAP_PER_CPU_CPU_LIST_PA !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) offsetof(struct trap_per_cpu, cpu_list_pa)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) (TRAP_PER_CPU_TSB_HUGE !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) offsetof(struct trap_per_cpu, tsb_huge)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) (TRAP_PER_CPU_TSB_HUGE_TEMP !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) (TRAP_PER_CPU_CPU_MONDO_QMASK !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) (TRAP_PER_CPU_DEV_MONDO_QMASK !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) (TRAP_PER_CPU_RESUM_QMASK !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) offsetof(struct trap_per_cpu, resum_qmask)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) (TRAP_PER_CPU_NONRESUM_QMASK !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) offsetof(struct trap_per_cpu, nonresum_qmask)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) (TRAP_PER_CPU_PER_CPU_BASE !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) offsetof(struct trap_per_cpu, __per_cpu_base)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) BUILD_BUG_ON((TSB_CONFIG_TSB !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) offsetof(struct tsb_config, tsb)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) (TSB_CONFIG_RSS_LIMIT !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) offsetof(struct tsb_config, tsb_rss_limit)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) (TSB_CONFIG_NENTRIES !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) offsetof(struct tsb_config, tsb_nentries)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) (TSB_CONFIG_REG_VAL !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) offsetof(struct tsb_config, tsb_reg_val)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) (TSB_CONFIG_MAP_VADDR !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) offsetof(struct tsb_config, tsb_map_vaddr)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) (TSB_CONFIG_MAP_PTE !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) offsetof(struct tsb_config, tsb_map_pte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) /* Attach to the address space of init_task. On SMP we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) * do this in smp.c:smp_callin for other cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) mmgrab(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) current->active_mm = &init_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) }