^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * File: mca.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Purpose: Generic MCA handling layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2003 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2002 Dell Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) Matt Domsch <Matt_Domsch@dell.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Copyright (C) 2002 Intel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright (C) Jenna Hall <jenna.s.hall@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Copyright (C) 2001 Intel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Copyright (C) Fred Lewis <frederick.v.lewis@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Copyright (C) 2000 Intel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Copyright (C) 1999, 2004-2008 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Copyright (C) 2006 FUJITSU LIMITED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Copyright (C) Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * added min save state dump, added INIT handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * 2001-01-03 Fred Lewis <frederick.v.lewis@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Added setup of CMCI and CPEI IRQs, logging of corrected platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * errors, completed code for logging of corrected & uncorrected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * machine check errors, and updated for conformance with Nov. 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * revision of the SAL 3.0 spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * 2002-01-04 Jenna Hall <jenna.s.hall@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Aligned MCA stack to 16 bytes, added platform vs. CPU error flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * set SAL default return values, changed error record structure to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * linked list, added init call to sal_get_state_info_size().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * 2002-03-25 Matt Domsch <Matt_Domsch@dell.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * GUID cleanups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * 2003-04-15 David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Added INIT backtrace support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * 2003-12-08 Keith Owens <kaos@sgi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * smp_call_function() must not be called from interrupt context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * (can deadlock on tasklist_lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * Use keventd to call smp_call_function().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * 2004-02-01 Keith Owens <kaos@sgi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Avoid deadlock when using printk() for MCA and INIT records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Delete all record printing code, moved to salinfo_decode in user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * space. Mark variables and functions static where possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Delete dead variables and functions. Reorder to remove the need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * for forward declarations and to consolidate related code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * 2005-08-12 Keith Owens <kaos@sgi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * Convert MCA/INIT handlers to use per event stacks and SAL/OS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * 2005-10-07 Keith Owens <kaos@sgi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Add notify_die() hooks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Add printing support for MCA/INIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * 2007-04-27 Russ Anderson <rja@sgi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Support multiple cpus going through OS_MCA in the same event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #include <asm/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #include <asm/meminit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #include <asm/sal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #include <asm/mca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #include <asm/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #include <asm/hw_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #include "mca_drv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #include "entry.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #include "irq.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #if defined(IA64_MCA_DEBUG_INFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) # define IA64_MCA_DEBUG(fmt...) printk(fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) # define IA64_MCA_DEBUG(fmt...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define NOTIFY_INIT(event, regs, arg, spin) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if ((notify_die((event), "INIT", (regs), (arg), 0, 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) == NOTIFY_STOP) && ((spin) == 1)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ia64_mca_spin(__func__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define NOTIFY_MCA(event, regs, arg, spin) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if ((notify_die((event), "MCA", (regs), (arg), 0, 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) == NOTIFY_STOP) && ((spin) == 1)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ia64_mca_spin(__func__); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Used by mca_asm.S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) DEFINE_PER_CPU(u64, ia64_mca_tr_reload); /* Flag for TR reload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned long __per_cpu_mca[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* In mca_asm.S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) extern void ia64_os_init_dispatch_monarch (void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) extern void ia64_os_init_dispatch_slave (void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int monarch_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static ia64_mc_info_t ia64_mc_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define CPE_HISTORY_LENGTH 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define CMC_HISTORY_LENGTH 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static struct timer_list cpe_poll_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static struct timer_list cmc_poll_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * This variable tells whether we are currently in polling mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Start with this in the wrong state so we won't play w/ timers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * before the system is ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static int cmc_polling_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Clearing this variable prevents CPE polling from getting activated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * in mca_late_init. Use it if your system doesn't provide a CPEI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * but encounters problems retrieving CPE logs. This should only be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * necessary for debugging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int cpe_poll_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static int mca_init __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * limited & delayed printing support for MCA/INIT handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define mprintk(fmt...) ia64_mca_printk(fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define MLOGBUF_SIZE (512+256*NR_CPUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define MLOGBUF_MSGMAX 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static char mlogbuf[MLOGBUF_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static DEFINE_SPINLOCK(mlogbuf_wlock); /* mca context only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static DEFINE_SPINLOCK(mlogbuf_rlock); /* normal context only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static unsigned long mlogbuf_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static unsigned long mlogbuf_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static unsigned int mlogbuf_finished = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static unsigned long mlogbuf_timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static int loglevel_save = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define BREAK_LOGLEVEL(__console_loglevel) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) oops_in_progress = 1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (loglevel_save < 0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) loglevel_save = __console_loglevel; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) __console_loglevel = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define RESTORE_LOGLEVEL(__console_loglevel) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (loglevel_save >= 0) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) __console_loglevel = loglevel_save; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) loglevel_save = -1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) mlogbuf_finished = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) oops_in_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Push messages into buffer, print them later if not urgent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) void ia64_mca_printk(const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int printed_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) char temp_buf[MLOGBUF_MSGMAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) printed_len = vscnprintf(temp_buf, sizeof(temp_buf), fmt, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* Copy the output into mlogbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (oops_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* mlogbuf was abandoned, use printk directly instead. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) printk("%s", temp_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) spin_lock(&mlogbuf_wlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) for (p = temp_buf; *p; p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) unsigned long next = (mlogbuf_end + 1) % MLOGBUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (next != mlogbuf_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) mlogbuf[mlogbuf_end] = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) mlogbuf_end = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* buffer full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) mlogbuf[mlogbuf_end] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) spin_unlock(&mlogbuf_wlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) EXPORT_SYMBOL(ia64_mca_printk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * Print buffered messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * NOTE: call this after returning normal context. (ex. from salinfod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) void ia64_mlogbuf_dump(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) char temp_buf[MLOGBUF_MSGMAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned long index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) unsigned int printed_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* Get output from mlogbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) while (mlogbuf_start != mlogbuf_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) temp_buf[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) p = temp_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) printed_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) spin_lock_irqsave(&mlogbuf_rlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) index = mlogbuf_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) while (index != mlogbuf_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) *p = mlogbuf[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) index = (index + 1) % MLOGBUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (!*p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (++printed_len >= MLOGBUF_MSGMAX - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) *p = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (temp_buf[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) printk("%s", temp_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) mlogbuf_start = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) mlogbuf_timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) spin_unlock_irqrestore(&mlogbuf_rlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) EXPORT_SYMBOL(ia64_mlogbuf_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * Call this if system is going to down or if immediate flushing messages to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * console is required. (ex. recovery was failed, crash dump is going to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * invoked, long-wait rendezvous etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * NOTE: this should be called from monarch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static void ia64_mlogbuf_finish(int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) BREAK_LOGLEVEL(console_loglevel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) spin_lock_init(&mlogbuf_rlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ia64_mlogbuf_dump();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) "MCA/INIT might be dodgy or fail.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* wait for console */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) printk("Delaying for 5 seconds...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) udelay(5*1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) mlogbuf_finished = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * Print buffered messages from INIT context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static void ia64_mlogbuf_dump_from_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (mlogbuf_finished)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (mlogbuf_timestamp &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) " and the system seems to be messed up.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ia64_mlogbuf_finish(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (!spin_trylock(&mlogbuf_rlock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) "Generated messages other than stack dump will be "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) "buffered to mlogbuf and will be printed later.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) printk(KERN_ERR "INIT: If messages would not printed after "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) "this INIT, wait 30sec and assert INIT again.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!mlogbuf_timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) mlogbuf_timestamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) spin_unlock(&mlogbuf_rlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) ia64_mlogbuf_dump();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ia64_mca_spin(const char *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (monarch_cpu == smp_processor_id())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) ia64_mlogbuf_finish(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) mprintk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) while (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * IA64_MCA log support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) #define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) typedef struct ia64_state_log_s
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) spinlock_t isl_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int isl_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) unsigned long isl_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) } ia64_state_log_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) #define IA64_LOG_INDEX_INC(it) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ia64_state_log[it].isl_count++;}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #define IA64_LOG_INDEX_DEC(it) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) #define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static inline void ia64_log_allocate(int it, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (!ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) panic("%s: Failed to allocate %llu bytes\n", __func__, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (!ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) panic("%s: Failed to allocate %llu bytes\n", __func__, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * ia64_log_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * Reset the OS ia64 log buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * Outputs : None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ia64_log_init(int sal_info_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) u64 max_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) IA64_LOG_LOCK_INIT(sal_info_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) // SAL will tell us the maximum size of any error record of this type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) max_size = ia64_sal_get_state_info_size(sal_info_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (!max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* alloc_bootmem() doesn't like zero-sized allocations! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) // set up OS data structures to hold error info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ia64_log_allocate(sal_info_type, max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * ia64_log_get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * Get the current MCA log from SAL and copy it into the OS log buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * irq_safe whether you can use printk at this point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * Outputs : size (total record length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * *buffer (ptr to error record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) sal_log_record_header_t *log_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) u64 total_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) unsigned long s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) IA64_LOG_LOCK(sal_info_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* Get the process state information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (total_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) IA64_LOG_INDEX_INC(sal_info_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) IA64_LOG_UNLOCK(sal_info_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (irq_safe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) __func__, sal_info_type, total_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) *buffer = (u8 *) log_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) IA64_LOG_UNLOCK(sal_info_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * ia64_mca_log_sal_error_record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * This function retrieves a specified error record type from SAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * and wakes up any processes waiting for error records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * Inputs : sal_info_type (Type of error record MCA/CMC/CPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * FIXME: remove MCA and irq_safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ia64_mca_log_sal_error_record(int sal_info_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) u8 *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) sal_log_record_header_t *rh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) u64 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) #ifdef IA64_MCA_DEBUG_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) size = ia64_log_get(sal_info_type, &buffer, irq_safe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (irq_safe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* Clear logs from corrected errors in case there's no user-level logger */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) rh = (sal_log_record_header_t *)buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (rh->severity == sal_log_severity_corrected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) ia64_sal_clear_state_info(sal_info_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * search_mca_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * See if the MCA surfaced in an instruction range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * that has been tagged as recoverable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * Inputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * first First address range to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * last Last address range to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * ip Instruction pointer, address we are looking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * 1 on Success (in the table)/ 0 on Failure (not in the table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) search_mca_table (const struct mca_table_entry *first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) const struct mca_table_entry *last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) const struct mca_table_entry *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) u64 curr_start, curr_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) curr = first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) while (curr <= last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) curr_start = (u64) &curr->start_addr + curr->start_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) curr_end = (u64) &curr->end_addr + curr->end_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if ((ip >= curr_start) && (ip <= curr_end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) curr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* Given an address, look for it in the mca tables. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int mca_recover_range(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) extern struct mca_table_entry __start___mca_table[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) extern struct mca_table_entry __stop___mca_table[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return search_mca_table(__start___mca_table, __stop___mca_table-1, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) EXPORT_SYMBOL_GPL(mca_recover_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) int cpe_vector = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int ia64_cpe_irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static unsigned long cpe_history[CPE_HISTORY_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static DEFINE_SPINLOCK(cpe_history_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) __func__, cpe_irq, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* SAL spec states this should run w/ interrupts enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) spin_lock(&cpe_history_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (!cpe_poll_enabled && cpe_vector >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int i, count = 1; /* we know 1 happened now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (now - cpe_history[i] <= HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (count >= CPE_HISTORY_LENGTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) cpe_poll_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) spin_unlock(&cpe_history_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * Corrected errors will still be corrected, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * make sure there's a log somewhere that indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * something is generating more than we can handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* lock already released, get out now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) cpe_history[index++] = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (index == CPE_HISTORY_LENGTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) spin_unlock(&cpe_history_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* Get the CPE error record and log it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * ia64_mca_register_cpev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * Register the corrected platform error vector with SAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * Inputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * cpev Corrected Platform Error Vector number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * Outputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ia64_mca_register_cpev (int cpev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* Register the CPE interrupt vector with SAL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct ia64_sal_retval isrv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (isrv.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) printk(KERN_ERR "Failed to register Corrected Platform "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) "Error interrupt vector with SAL (status %ld)\n", isrv.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) IA64_MCA_DEBUG("%s: corrected platform error "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) "vector %#x registered\n", __func__, cpev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * ia64_mca_cmc_vector_setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * Setup the corrected machine check vector register in the processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * This function is invoked on a per-processor basis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * Inputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * Outputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ia64_mca_cmc_vector_setup (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) cmcv_reg_t cmcv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) cmcv.cmcv_regval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) cmcv.cmcv_vector = IA64_CMC_VECTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) __func__, smp_processor_id(), IA64_CMC_VECTOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * ia64_mca_cmc_vector_disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * Mask the corrected machine check vector register in the processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * This function is invoked on a per-processor basis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * Inputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * dummy(unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * Outputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) ia64_mca_cmc_vector_disable (void *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) cmcv_reg_t cmcv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) __func__, smp_processor_id(), cmcv.cmcv_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * ia64_mca_cmc_vector_enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * Unmask the corrected machine check vector register in the processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * This function is invoked on a per-processor basis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * Inputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * dummy(unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * Outputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) ia64_mca_cmc_vector_enable (void *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) cmcv_reg_t cmcv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) __func__, smp_processor_id(), cmcv.cmcv_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * ia64_mca_cmc_vector_disable_keventd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * Called via keventd (smp_call_function() is not safe in interrupt context) to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * disable the cmc interrupt vector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * ia64_mca_cmc_vector_enable_keventd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * Called via keventd (smp_call_function() is not safe in interrupt context) to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * enable the cmc interrupt vector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * ia64_mca_wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * Send an inter-cpu interrupt to wake-up a particular cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * Inputs : cpuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * Outputs : None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ia64_mca_wakeup(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ia64_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * ia64_mca_wakeup_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * Wakeup all the slave cpus which have rendez'ed previously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * Inputs : None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * Outputs : None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ia64_mca_wakeup_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /* Clear the Rendez checkin flag for all cpus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) ia64_mca_wakeup(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * ia64_mca_rendez_interrupt_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * This is handler used to put slave processors into spinloop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * while the monarch processor does the mca handling and later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * wake each slave up once the monarch is done. The state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * IA64_MCA_RENDEZ_CHECKIN_DONE indicates the cpu is rendez'ed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * in SAL. The state IA64_MCA_RENDEZ_CHECKIN_NOTDONE indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * the cpu has come out of OS rendezvous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * Inputs : None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * Outputs : None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct ia64_mca_notify_die nd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) { .sos = NULL, .monarch_cpu = &monarch_cpu };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* Mask all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* Register with the SAL monarch that the slave has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * reached SAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ia64_sal_mc_rendez();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* Wait for the monarch cpu to exit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) while (monarch_cpu != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) cpu_relax(); /* spin until monarch leaves */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /* Enable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * ia64_mca_wakeup_int_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * The interrupt handler for processing the inter-cpu interrupt to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * slave cpu which was spinning in the rendez loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * Since this spinning is done by turning off the interrupts and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * polling on the wakeup-interrupt bit in the IRR, there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * nothing useful to be done in the handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * Inputs : wakeup_irq (Wakeup-interrupt bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * arg (Interrupt handler specific argument)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * Outputs : None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /* Function pointer for extra MCA recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) int (*ia64_mca_ucmc_extension)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) (void*,struct ia64_sal_os_state*)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (ia64_mca_ucmc_extension)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) ia64_mca_ucmc_extension = fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) ia64_unreg_MCA_extension(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (ia64_mca_ucmc_extension)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ia64_mca_ucmc_extension = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) EXPORT_SYMBOL(ia64_reg_MCA_extension);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) EXPORT_SYMBOL(ia64_unreg_MCA_extension);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) copy_reg(const u64 *fr, u64 fnat, unsigned long *tr, unsigned long *tnat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) u64 fslot, tslot, nat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) *tr = *fr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) fslot = ((unsigned long)fr >> 3) & 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) tslot = ((unsigned long)tr >> 3) & 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) *tnat &= ~(1UL << tslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) nat = (fnat >> fslot) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) *tnat |= (nat << tslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /* Change the comm field on the MCA/INT task to include the pid that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * was interrupted, it makes for easier debugging. If that pid was 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * (swapper or nested MCA/INIT) then use the start of the previous comm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * field suffixed with its cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ia64_mca_modify_comm(const struct task_struct *previous_current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) char *p, comm[sizeof(current->comm)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (previous_current->pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) snprintf(comm, sizeof(comm), "%s %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) current->comm, previous_current->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) int l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if ((p = strchr(previous_current->comm, ' ')))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) l = p - previous_current->comm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) l = strlen(previous_current->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) snprintf(comm, sizeof(comm), "%s %*s %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) current->comm, l, previous_current->comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) task_thread_info(previous_current)->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) memcpy(current->comm, comm, sizeof(current->comm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) unsigned long *nat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) const pal_min_state_area_t *ms = sos->pal_min_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) const u64 *bank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * pmsa_{xip,xpsr,xfs}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (ia64_psr(regs)->ic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) regs->cr_iip = ms->pmsa_iip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) regs->cr_ipsr = ms->pmsa_ipsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) regs->cr_ifs = ms->pmsa_ifs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) regs->cr_iip = ms->pmsa_xip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) regs->cr_ipsr = ms->pmsa_xpsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) regs->cr_ifs = ms->pmsa_xfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) sos->iip = ms->pmsa_iip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) sos->ipsr = ms->pmsa_ipsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) sos->ifs = ms->pmsa_ifs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) regs->pr = ms->pmsa_pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) regs->b0 = ms->pmsa_br0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) regs->ar_rsc = ms->pmsa_rsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, ®s->r1, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, ®s->r2, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, ®s->r3, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, ®s->r8, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, ®s->r9, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, ®s->r10, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, ®s->r11, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, ®s->r12, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, ®s->r13, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, ®s->r14, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, ®s->r15, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (ia64_psr(regs)->bn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) bank = ms->pmsa_bank1_gr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) bank = ms->pmsa_bank0_gr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) copy_reg(&bank[16-16], ms->pmsa_nat_bits, ®s->r16, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) copy_reg(&bank[17-16], ms->pmsa_nat_bits, ®s->r17, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) copy_reg(&bank[18-16], ms->pmsa_nat_bits, ®s->r18, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) copy_reg(&bank[19-16], ms->pmsa_nat_bits, ®s->r19, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) copy_reg(&bank[20-16], ms->pmsa_nat_bits, ®s->r20, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) copy_reg(&bank[21-16], ms->pmsa_nat_bits, ®s->r21, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) copy_reg(&bank[22-16], ms->pmsa_nat_bits, ®s->r22, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) copy_reg(&bank[23-16], ms->pmsa_nat_bits, ®s->r23, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) copy_reg(&bank[24-16], ms->pmsa_nat_bits, ®s->r24, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) copy_reg(&bank[25-16], ms->pmsa_nat_bits, ®s->r25, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) copy_reg(&bank[26-16], ms->pmsa_nat_bits, ®s->r26, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) copy_reg(&bank[27-16], ms->pmsa_nat_bits, ®s->r27, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) copy_reg(&bank[28-16], ms->pmsa_nat_bits, ®s->r28, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) copy_reg(&bank[29-16], ms->pmsa_nat_bits, ®s->r29, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) copy_reg(&bank[30-16], ms->pmsa_nat_bits, ®s->r30, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) copy_reg(&bank[31-16], ms->pmsa_nat_bits, ®s->r31, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /* On entry to this routine, we are running on the per cpu stack, see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * mca_asm.h. The original stack has not been touched by this event. Some of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * the original stack's registers will be in the RBS on this stack. This stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * also contains a partial pt_regs and switch_stack, the rest of the data is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * PAL minstate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * The first thing to do is modify the original stack to look like a blocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * task so we can run backtrace on the original task. Also mark the per cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * stack as current to ensure that we use the correct task state, it also means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * that we can do backtrace on the MCA/INIT handler code itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) static struct task_struct *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) ia64_mca_modify_original_stack(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) const struct switch_stack *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct ia64_sal_os_state *sos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) const char *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) ia64_va va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) const pal_min_state_area_t *ms = sos->pal_min_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct task_struct *previous_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct pt_regs *old_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct switch_stack *old_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) unsigned size = sizeof(struct pt_regs) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) sizeof(struct switch_stack) + 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) unsigned long *old_bspstore, *old_bsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) unsigned long *new_bspstore, *new_bsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) unsigned long old_unat, old_rnat, new_rnat, nat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) u64 slots, loadrs = regs->loadrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) u64 ar_bspstore = regs->ar_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) const char *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) previous_current = curr_task(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) ia64_set_curr_task(cpu, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if ((p = strchr(current->comm, ' ')))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) *p = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /* Best effort attempt to cope with MCA/INIT delivered while in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * physical mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) regs->cr_ipsr = ms->pmsa_ipsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (ia64_psr(regs)->dt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) va.l = r12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (va.f.reg == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) va.f.reg = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) r12 = va.l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) va.l = r13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (va.f.reg == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) va.f.reg = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) r13 = va.l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (ia64_psr(regs)->rt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) va.l = ar_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (va.f.reg == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) va.f.reg = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ar_bspstore = va.l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) va.l = ar_bsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (va.f.reg == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) va.f.reg = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ar_bsp = va.l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * have been copied to the old stack, the old stack may fail the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * validation tests below. So ia64_old_stack() must restore the dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * registers from the new stack. The old and new bspstore probably
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * have different alignments, so loadrs calculated on the old bsp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * cannot be used to restore from the new bsp. Calculate a suitable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * loadrs for the new stack and save it in the new pt_regs, where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * ia64_old_stack() can get it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) old_bspstore = (unsigned long *)ar_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) old_bsp = (unsigned long *)ar_bsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) slots = ia64_rse_num_regs(old_bspstore, old_bsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) new_bspstore = (unsigned long *)((u64)current + IA64_RBS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) new_bsp = ia64_rse_skip_regs(new_bspstore, slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /* Verify the previous stack state before we change it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) msg = "occurred in user space";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* previous_current is guaranteed to be valid when the task was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * in user space, so ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) ia64_mca_modify_comm(previous_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) goto no_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (r13 != sos->prev_IA64_KR_CURRENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) msg = "inconsistent previous current and r13";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) goto no_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (!mca_recover_range(ms->pmsa_iip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if ((r12 - r13) >= KERNEL_STACK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) msg = "inconsistent r12 and r13";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) goto no_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) msg = "inconsistent ar.bspstore and r13";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) goto no_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) va.p = old_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (va.f.reg < 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) msg = "old_bspstore is in the wrong region";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) goto no_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) msg = "inconsistent ar.bsp and r13";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) goto no_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (ar_bspstore + size > r12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) msg = "no room for blocked state";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) goto no_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) ia64_mca_modify_comm(previous_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /* Make the original task look blocked. First stack a struct pt_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * describing the state at the time of interrupt. mca_asm.S built a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * partial pt_regs, copy it and fill in the blanks using minstate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) p = (char *)r12 - sizeof(*regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) old_regs = (struct pt_regs *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) memcpy(old_regs, regs, sizeof(*regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) old_regs->loadrs = loadrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) old_unat = old_regs->ar_unat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) finish_pt_regs(old_regs, sos, &old_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* Next stack a struct switch_stack. mca_asm.S built a partial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * switch_stack, copy it and fill in the blanks using pt_regs and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * minstate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * In the synthesized switch_stack, b0 points to ia64_leave_kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * ar.pfs is set to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * unwind.c::unw_unwind() does special processing for interrupt frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * that this is documented, of course. Set PRED_NON_SYSCALL in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * switch_stack on the original stack so it will unwind correctly when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * unwind.c reads pt_regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * thread.ksp is updated to point to the synthesized switch_stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) p -= sizeof(struct switch_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) old_sw = (struct switch_stack *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) memcpy(old_sw, sw, sizeof(*sw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) old_sw->caller_unat = old_unat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) old_sw->ar_fpsr = old_regs->ar_fpsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) old_sw->b0 = (u64)ia64_leave_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) old_sw->b1 = ms->pmsa_br1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) old_sw->ar_pfs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) old_sw->ar_unat = old_unat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) previous_current->thread.ksp = (u64)p - 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /* Finally copy the original stack's registers back to its RBS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * Registers from ar.bspstore through ar.bsp at the time of the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * are in the current RBS, copy them back to the original stack. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * copy must be done register by register because the original bspstore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * and the current one have different alignments, so the saved RNAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * data occurs at different places.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * mca_asm does cover, so the old_bsp already includes all registers at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * the time of MCA/INIT. It also does flushrs, so all registers before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * this function have been written to backing store on the MCA/INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) old_rnat = regs->ar_rnat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) while (slots--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (ia64_rse_is_rnat_slot(new_bspstore)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) new_rnat = ia64_get_rnat(new_bspstore++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (ia64_rse_is_rnat_slot(old_bspstore)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) *old_bspstore++ = old_rnat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) old_rnat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) old_rnat |= (nat << ia64_rse_slot_num(old_bspstore));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) *old_bspstore++ = *new_bspstore++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) old_sw->ar_bspstore = (unsigned long)old_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) old_sw->ar_rnat = old_rnat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) sos->prev_task = previous_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return previous_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) no_mod:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) smp_processor_id(), type, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) old_unat = regs->ar_unat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) finish_pt_regs(regs, sos, &old_unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return previous_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /* The monarch/slave interaction is based on monarch_cpu and requires that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * slaves have entered rendezvous before the monarch leaves. If any cpu has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * not entered rendezvous yet then wait a bit. The assumption is that any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * slave that has not rendezvoused after a reasonable time is never going to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * so. In this context, slave includes cpus that respond to the MCA rendezvous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * interrupt, as well as cpus that receive the INIT slave event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) ia64_wait_for_slaves(int monarch, const char *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) int c, i , wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * wait 5 seconds total for slaves (arbitrary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) for (i = 0; i < 5000; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) for_each_online_cpu(c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (c == monarch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (ia64_mc_info.imi_rendez_checkin[c]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) udelay(1000); /* short wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) wait = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (!wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) goto all_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * Maybe slave(s) dead. Print buffered messages immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) ia64_mlogbuf_finish(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) for_each_online_cpu(c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (c == monarch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) mprintk(" %d", c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) mprintk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) all_in:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /* mca_insert_tr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * Switch rid when TR reload and needed!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * iord: 1: itr, 2: itr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static void mca_insert_tr(u64 iord)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) u64 old_rr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) struct ia64_tr_entry *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) unsigned long psr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (!ia64_idtrs[cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) psr = ia64_clear_ic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (p->pte & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) old_rr = ia64_get_rr(p->ifa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (old_rr != p->rr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) ia64_set_rr(p->ifa, p->rr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) ia64_srlz_d();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) ia64_ptr(iord, p->ifa, p->itir >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) ia64_srlz_i();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (iord & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) ia64_srlz_i();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (iord & 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) ia64_srlz_i();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (old_rr != p->rr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) ia64_set_rr(p->ifa, old_rr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) ia64_srlz_d();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) ia64_set_psr(psr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) * ia64_mca_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * This is uncorrectable machine check handler called from OS_MCA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * dispatch code which is in turn called from SAL_CHECK().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * This is the place where the core of OS MCA handling is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * Right now the logs are extracted and displayed in a well-defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * format. This handler code is supposed to be run only on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * monarch processor. Once the monarch is done with MCA handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * further MCA logging is enabled by clearing logs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * Monarch also has the duty of sending wakeup-IPIs to pull the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * slave processors out of rendezvous spinloop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * If multiple processors call into OS_MCA, the first will become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * the monarch. Subsequent cpus will be recorded in the mca_cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * bitmask. After the first monarch has processed its MCA, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * will wake up the next cpu in the mca_cpu bitmask and then go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * into the rendezvous loop. When all processors have serviced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * their MCA, the last monarch frees up the rest of the processors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct ia64_sal_os_state *sos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) int recover, cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) struct task_struct *previous_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) struct ia64_mca_notify_die nd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) { .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) static atomic_t mca_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) static cpumask_t mca_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (atomic_add_return(1, &mca_count) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) monarch_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) sos->monarch = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) cpumask_set_cpu(cpu, &mca_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) sos->monarch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (sos->monarch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) ia64_wait_for_slaves(cpu, "MCA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) /* Wakeup all the processors which are spinning in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * rendezvous loop. They will leave SAL, then spin in the OS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * with interrupts disabled until this monarch cpu leaves the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * MCA handler. That gets control back to the OS so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * backtrace the other cpus, backtrace when spinning in SAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * does not work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) ia64_mca_wakeup_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) while (cpumask_test_cpu(cpu, &mca_cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) cpu_relax(); /* spin until monarch wakes us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) /* Get the MCA error record and log it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /* MCA error recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) recover = (ia64_mca_ucmc_extension
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) && ia64_mca_ucmc_extension(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) sos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (recover) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) rh->severity = sal_log_severity_corrected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) sos->os_status = IA64_MCA_CORRECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /* Dump buffered message to console */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) ia64_mlogbuf_finish(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (__this_cpu_read(ia64_mca_tr_reload)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) mca_insert_tr(0x1); /*Reload dynamic itrs*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) mca_insert_tr(0x2); /*Reload dynamic itrs*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (atomic_dec_return(&mca_count) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* wake up the next monarch cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * and put this cpu in the rendez loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) for_each_online_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (cpumask_test_cpu(i, &mca_cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) monarch_cpu = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) cpumask_clear_cpu(i, &mca_cpu); /* wake next cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) while (monarch_cpu != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) cpu_relax(); /* spin until last cpu leaves */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) ia64_set_curr_task(cpu, previous_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) ia64_mc_info.imi_rendez_checkin[cpu]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) ia64_set_curr_task(cpu, previous_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) monarch_cpu = -1; /* This frees the slaves and previous monarchs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * ia64_mca_cmc_int_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * This is corrected machine check interrupt handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * Right now the logs are extracted and displayed in a well-defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * Inputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * client data arg ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * Outputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) ia64_mca_cmc_int_handler(int cmc_irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) static unsigned long cmc_history[CMC_HISTORY_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static DEFINE_SPINLOCK(cmc_history_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) __func__, cmc_irq, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) /* SAL spec states this should run w/ interrupts enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) spin_lock(&cmc_history_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (!cmc_polling_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) int i, count = 1; /* we know 1 happened now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (now - cmc_history[i] <= HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (count >= CMC_HISTORY_LENGTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) cmc_polling_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) spin_unlock(&cmc_history_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) /* If we're being hit with CMC interrupts, we won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * ever execute the schedule_work() below. Need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * disable CMC interrupts on this processor now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) ia64_mca_cmc_vector_disable(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) schedule_work(&cmc_disable_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) * Corrected errors will still be corrected, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * make sure there's a log somewhere that indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * something is generating more than we can handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /* lock already released, get out now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) cmc_history[index++] = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (index == CMC_HISTORY_LENGTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) spin_unlock(&cmc_history_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /* Get the CMC error record and log it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * ia64_mca_cmc_int_caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) * Triggered by sw interrupt from CMC polling routine. Calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) * real interrupt handler and either triggers a sw interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * on the next cpu or does cleanup at the end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * Inputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * client data arg ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * Outputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) ia64_mca_cmc_int_caller(int cmc_irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) static int start_count = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) unsigned int cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) cpuid = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) /* If first cpu, update count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (start_count == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) ia64_mca_cmc_int_handler(cmc_irq, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) cpuid = cpumask_next(cpuid+1, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (cpuid < nr_cpu_ids) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) ia64_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) /* If no log record, switch out of polling mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) schedule_work(&cmc_enable_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) cmc_polling_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) start_count = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * ia64_mca_cmc_poll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * Poll for Corrected Machine Checks (CMCs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * Inputs : dummy(unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * Outputs : None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) ia64_mca_cmc_poll (struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) /* Trigger a CMC interrupt cascade */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) ia64_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) IA64_IPI_DM_INT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * ia64_mca_cpe_int_caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * Triggered by sw interrupt from CPE polling routine. Calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * real interrupt handler and either triggers a sw interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * on the next cpu or does cleanup at the end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * Inputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * client data arg ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * Outputs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) static int start_count = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static int poll_time = MIN_CPE_POLL_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) unsigned int cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) cpuid = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) /* If first cpu, update count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (start_count == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) ia64_mca_cpe_int_handler(cpe_irq, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) cpuid = cpumask_next(cpuid+1, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (cpuid < NR_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) ia64_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * If a log was recorded, increase our polling frequency,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * otherwise, backoff or return to interrupt mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) } else if (cpe_vector < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) poll_time = MIN_CPE_POLL_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) cpe_poll_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (cpe_poll_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) mod_timer(&cpe_poll_timer, jiffies + poll_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) start_count = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * ia64_mca_cpe_poll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * Poll for Corrected Platform Errors (CPEs), trigger interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * on first cpu, from there it will trickle through all the cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * Inputs : dummy(unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) * Outputs : None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) ia64_mca_cpe_poll (struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) /* Trigger a CPE interrupt cascade */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) ia64_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) IA64_IPI_DM_INT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) int c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) struct task_struct *g, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (val != DIE_INIT_MONARCH_PROCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) #ifdef CONFIG_KEXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (atomic_read(&kdump_in_progress))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * FIXME: mlogbuf will brim over with INIT stack dumps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * To enable show_stack from INIT, we use oops_in_progress which should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) * be used in real oops. This would cause something wrong after INIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) BREAK_LOGLEVEL(console_loglevel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) ia64_mlogbuf_dump_from_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) printk(KERN_ERR "Processes interrupted by INIT -");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) for_each_online_cpu(c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) struct ia64_sal_os_state *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) g = s->prev_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (g) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (g->pid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) printk(" %d", g->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) printk("\n\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (read_trylock(&tasklist_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) do_each_thread (g, t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) show_stack(t, NULL, KERN_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) } while_each_thread (g, t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) /* FIXME: This will not restore zapped printk locks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) RESTORE_LOGLEVEL(console_loglevel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * C portion of the OS INIT handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) * Called from ia64_os_init_dispatch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * this event. This code is used for both monarch and slave INIT events, see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * sos->monarch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) * All INIT events switch to the INIT stack and change the previous process to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * blocked status. If one of the INIT events is the monarch then we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * probably processing the nmi button/command. Use the monarch cpu to dump all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * the processes. The slave INIT events all spin until the monarch cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * returns. We can also get INIT slave events for MCA, in which case the MCA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * process is the monarch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) struct ia64_sal_os_state *sos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) static atomic_t slaves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) static atomic_t monarchs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) struct task_struct *previous_current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) struct ia64_mca_notify_die nd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) { .sos = sos, .monarch_cpu = &monarch_cpu };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) sos->proc_state_param, cpu, sos->monarch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) sos->os_status = IA64_INIT_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) /* FIXME: Workaround for broken proms that drive all INIT events as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * slaves. The last slave that enters is promoted to be a monarch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) * Remove this code in September 2006, that gives platforms a year to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) * fix their proms and get their customers updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) __func__, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) atomic_dec(&slaves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) sos->monarch = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) /* FIXME: Workaround for broken proms that drive all INIT events as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * monarchs. Second and subsequent monarchs are demoted to slaves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) * Remove this code in September 2006, that gives platforms a year to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) * fix their proms and get their customers updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) __func__, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) atomic_dec(&monarchs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) sos->monarch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (!sos->monarch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) #ifdef CONFIG_KEXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) while (monarch_cpu == -1 && !atomic_read(&kdump_in_progress))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) while (monarch_cpu == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) cpu_relax(); /* spin until monarch enters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) #ifdef CONFIG_KEXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) while (monarch_cpu != -1 && !atomic_read(&kdump_in_progress))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) while (monarch_cpu != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) cpu_relax(); /* spin until monarch leaves */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) mprintk("Slave on cpu %d returning to normal service.\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) ia64_set_curr_task(cpu, previous_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) atomic_dec(&slaves);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) monarch_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) * generated via the BMC's command-line interface, but since the console is on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) * same serial line, the user will need some time to switch out of the BMC before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * the dump begins.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) mprintk("Delaying for 5 seconds...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) udelay(5*1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) ia64_wait_for_slaves(cpu, "INIT");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) * to default_monarch_init_process() above and just print all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) * tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) atomic_dec(&monarchs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) ia64_set_curr_task(cpu, previous_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) monarch_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) ia64_mca_disable_cpe_polling(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) cpe_poll_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) /* Minimal format of the MCA/INIT stacks. The pseudo processes that run on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * these stacks can never sleep, they cannot return from the kernel to user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) * space, they do not appear in a normal ps listing. So there is no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) * format most of the fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) format_mca_init_stack(void *mca_data, unsigned long offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) const char *type, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) struct thread_info *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) memset(p, 0, KERNEL_STACK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) ti = task_thread_info(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) ti->flags = _TIF_MCA_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) ti->preempt_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) ti->task = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) ti->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) p->stack = ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) p->state = TASK_UNINTERRUPTIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) cpumask_set_cpu(cpu, &p->cpus_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) INIT_LIST_HEAD(&p->tasks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) p->parent = p->real_parent = p->group_leader = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) INIT_LIST_HEAD(&p->children);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) INIT_LIST_HEAD(&p->sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) strncpy(p->comm, type, sizeof(p->comm)-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) /* Caller prevents this from being called after init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) static void * __ref mca_bootmem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) return memblock_alloc(sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) /* Do per-CPU MCA-related initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) ia64_mca_cpu_init(void *cpu_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) void *pal_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) long sz = sizeof(struct ia64_mca_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) static int first_time = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) * Structure will already be allocated if cpu has been online,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * then offlined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (__per_cpu_mca[cpu]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) data = __va(__per_cpu_mca[cpu]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (first_time) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) data = mca_bootmem();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) first_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) data = (void *)__get_free_pages(GFP_ATOMIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) get_order(sz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) panic("Could not allocate MCA memory for cpu %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) "MCA", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) "INIT", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) __this_cpu_write(ia64_mca_data, (__per_cpu_mca[cpu] = __pa(data)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * Stash away a copy of the PTE needed to map the per-CPU page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) * We may need it during MCA recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) __this_cpu_write(ia64_mca_per_cpu_pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * Also, stash away a copy of the PAL address and the PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * needed to map it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) pal_vaddr = efi_get_pal_addr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) if (!pal_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) __this_cpu_write(ia64_mca_pal_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) GRANULEROUNDDOWN((unsigned long) pal_vaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) __this_cpu_write(ia64_mca_pal_pte, pte_val(mk_pte_phys(__pa(pal_vaddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) PAGE_KERNEL)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) static int ia64_mca_cpu_online(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (!cmc_polling_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) ia64_mca_cmc_vector_enable(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) * ia64_mca_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * Do all the system level mca specific initialization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * 1. Register spinloop and wakeup request interrupt vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * 2. Register OS_MCA handler entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) * 3. Register OS_INIT handler entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) * Note that this initialization is done very early before some kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) * services are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * Inputs : None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * Outputs : None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) ia64_mca_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) struct ia64_sal_retval isrv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) unsigned long timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) static struct notifier_block default_init_monarch_nb = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) .notifier_call = default_monarch_init_process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) .priority = 0/* we need to notified last */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) IA64_MCA_DEBUG("%s: begin\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) /* Clear the Rendez checkin flag for all cpus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) for(i = 0 ; i < NR_CPUS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) * Register the rendezvous spinloop and wakeup mechanism with SAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) /* Register the rendezvous interrupt vector with SAL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) SAL_MC_PARAM_MECHANISM_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) IA64_MCA_RENDEZ_VECTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) SAL_MC_PARAM_RZ_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) rc = isrv.status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (rc == -2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) printk(KERN_INFO "Increasing MCA rendezvous timeout from "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) "%ld to %ld milliseconds\n", timeout, isrv.v0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) timeout = isrv.v0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) printk(KERN_ERR "Failed to register rendezvous interrupt "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) "with SAL (status %ld)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) /* Register the wakeup interrupt vector with SAL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) SAL_MC_PARAM_MECHANISM_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) IA64_MCA_WAKEUP_VECTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) rc = isrv.status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) "(status %ld)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) * XXX - disable SAL checksum by setting size to 0; should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) ia64_mc_info.imi_mca_handler_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) /* Register the os mca handler with SAL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) ia64_mc_info.imi_mca_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) ia64_tpa(mca_hldlr_ptr->gp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) ia64_mc_info.imi_mca_handler_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 0, 0, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) printk(KERN_ERR "Failed to register OS MCA handler with SAL "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) "(status %ld)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) * XXX - disable SAL checksum by setting size to 0, should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) * size of the actual init handler in mca_asm.S.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) ia64_mc_info.imi_monarch_init_handler_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) ia64_mc_info.imi_slave_init_handler_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) ia64_mc_info.imi_monarch_init_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) /* Register the os init handler with SAL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) ia64_mc_info.imi_monarch_init_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) ia64_tpa(ia64_getreg(_IA64_REG_GP)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) ia64_mc_info.imi_monarch_init_handler_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) ia64_mc_info.imi_slave_init_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) ia64_tpa(ia64_getreg(_IA64_REG_GP)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) ia64_mc_info.imi_slave_init_handler_size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) "(status %ld)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (register_die_notifier(&default_init_monarch_nb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) printk(KERN_ERR "Failed to register default monarch INIT process\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) /* Initialize the areas set aside by the OS to buffer the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) * platform/processor error states for MCA/INIT/CMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) * handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) ia64_log_init(SAL_INFO_TYPE_MCA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) ia64_log_init(SAL_INFO_TYPE_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) ia64_log_init(SAL_INFO_TYPE_CMC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) ia64_log_init(SAL_INFO_TYPE_CPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) mca_init = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) printk(KERN_INFO "MCA related initialization done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) * These pieces cannot be done in ia64_mca_init() because it is called before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) * early_irq_init() which would wipe out our percpu irq registrations. But we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * cannot leave them until ia64_mca_late_init() because by then all the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * processors have been brought online and have set their own CMC vectors to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) * point at a non-existant action. Called from arch_early_irq_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) void __init ia64_mca_irq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * Configure the CMCI/P vector and handler. Interrupts for CMC are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) register_percpu_irq(IA64_CMC_VECTOR, ia64_mca_cmc_int_handler, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) "cmc_hndlr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) register_percpu_irq(IA64_CMCP_VECTOR, ia64_mca_cmc_int_caller, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) "cmc_poll");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) /* Setup the MCA rendezvous interrupt vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, ia64_mca_rendez_int_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 0, "mca_rdzv");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) /* Setup the MCA wakeup interrupt vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, ia64_mca_wakeup_int_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 0, "mca_wkup");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) /* Setup the CPEI/P handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) register_percpu_irq(IA64_CPEP_VECTOR, ia64_mca_cpe_int_caller, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) "cpe_poll");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * ia64_mca_late_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) * Opportunity to setup things that require initialization later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * than ia64_mca_init. Setup a timer to poll for CPEs if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) * platform doesn't support an interrupt driven mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) * Inputs : None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) * Outputs : Status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) ia64_mca_late_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (!mca_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) /* Setup the CMCI/P vector and handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) timer_setup(&cmc_poll_timer, ia64_mca_cmc_poll, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) /* Unmask/enable the vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) cmc_polling_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/mca:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) ia64_mca_cpu_online, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) /* Setup the CPEI/P vector and handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) timer_setup(&cpe_poll_timer, ia64_mca_cpe_poll, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) if (cpe_vector >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) /* If platform supports CPEI, enable the irq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) irq = local_vector_to_irq(cpe_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) if (irq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) cpe_poll_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) irq_set_status_flags(irq, IRQ_PER_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if (request_irq(irq, ia64_mca_cpe_int_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 0, "cpe_hndlr", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) pr_err("Failed to register cpe_hndlr interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) ia64_cpe_irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) ia64_mca_register_cpev(cpe_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) printk(KERN_ERR "%s: Failed to find irq for CPE "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) "interrupt handler, vector %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) __func__, cpe_vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) /* If platform doesn't support CPEI, get the timer going. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (cpe_poll_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) ia64_mca_cpe_poll(0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) device_initcall(ia64_mca_late_init);