Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Machine check exception handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright 2013 IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #undef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #define pr_fmt(fmt) "mce: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/irq_work.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/mce.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) static DEFINE_PER_CPU(int, mce_nest_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /* Queue for delayed MCE events. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static DEFINE_PER_CPU(int, mce_queue_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /* Queue for delayed MCE UE events. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) static DEFINE_PER_CPU(int, mce_ue_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 					mce_ue_event_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static void machine_check_process_queued_event(struct irq_work *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) static void machine_check_ue_irq_work(struct irq_work *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static void machine_check_ue_event(struct machine_check_event *evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static void machine_process_ue_event(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static struct irq_work mce_event_process_work = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)         .func = machine_check_process_queued_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static struct irq_work mce_ue_event_irq_work = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	.func = machine_check_ue_irq_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) int mce_register_notifier(struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	return blocking_notifier_chain_register(&mce_notifier_list, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) EXPORT_SYMBOL_GPL(mce_register_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) int mce_unregister_notifier(struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	return blocking_notifier_chain_unregister(&mce_notifier_list, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) EXPORT_SYMBOL_GPL(mce_unregister_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) static void mce_set_error_info(struct machine_check_event *mce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 			       struct mce_error_info *mce_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	mce->error_type = mce_err->error_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	switch (mce_err->error_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	case MCE_ERROR_TYPE_UE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	case MCE_ERROR_TYPE_SLB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	case MCE_ERROR_TYPE_ERAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	case MCE_ERROR_TYPE_TLB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	case MCE_ERROR_TYPE_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		mce->u.user_error.user_error_type = mce_err->u.user_error_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	case MCE_ERROR_TYPE_RA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	case MCE_ERROR_TYPE_LINK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		mce->u.link_error.link_error_type = mce_err->u.link_error_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	case MCE_ERROR_TYPE_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * Decode and save high level MCE information into per cpu buffer which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * is an array of machine_check_event structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void save_mce_event(struct pt_regs *regs, long handled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		    struct mce_error_info *mce_err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		    uint64_t nip, uint64_t addr, uint64_t phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	int index = __this_cpu_inc_return(mce_nest_count) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	 * Return if we don't have enough space to log mce event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 * the check below will stop buffer overrun.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	if (index >= MAX_MC_EVT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	/* Populate generic machine check info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	mce->version = MCE_V1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	mce->srr0 = nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	mce->srr1 = regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	mce->gpr3 = regs->gpr[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	mce->in_use = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	mce->cpu = get_paca()->paca_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	/* Mark it recovered if we have handled it and MSR(RI=1). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	if (handled && (regs->msr & MSR_RI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		mce->disposition = MCE_DISPOSITION_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	mce->initiator = mce_err->initiator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	mce->severity = mce_err->severity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	mce->sync_error = mce_err->sync_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	mce->error_class = mce_err->error_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	 * Populate the mce error_type and type-specific error_type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	mce_set_error_info(mce, mce_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	if (mce->error_type == MCE_ERROR_TYPE_TLB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		mce->u.tlb_error.effective_address_provided = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		mce->u.tlb_error.effective_address = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	} else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		mce->u.slb_error.effective_address_provided = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		mce->u.slb_error.effective_address = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		mce->u.erat_error.effective_address_provided = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		mce->u.erat_error.effective_address = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		mce->u.user_error.effective_address_provided = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		mce->u.user_error.effective_address = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		mce->u.ra_error.effective_address_provided = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		mce->u.ra_error.effective_address = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		mce->u.link_error.effective_address_provided = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		mce->u.link_error.effective_address = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		mce->u.ue_error.effective_address_provided = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		mce->u.ue_error.effective_address = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		if (phys_addr != ULONG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			mce->u.ue_error.physical_address_provided = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			mce->u.ue_error.physical_address = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			mce->u.ue_error.ignore_event = mce_err->ignore_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 			machine_check_ue_event(mce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  * get_mce_event:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  *	mce	Pointer to machine_check_event structure to be filled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  *	release Flag to indicate whether to free the event slot or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  *		0 <= do not release the mce event. Caller will invoke
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  *		     release_mce_event() once event has been consumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  *		1 <= release the slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  *	return	1 = success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  *		0 = failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * get_mce_event() will be called by platform specific machine check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * handle routine and in KVM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  * When we call get_mce_event(), we are still in interrupt context and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  * preemption will not be scheduled until ret_from_expect() routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  * is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int get_mce_event(struct machine_check_event *mce, bool release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	int index = __this_cpu_read(mce_nest_count) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	struct machine_check_event *mc_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	/* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	if (index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	/* Check if we have MCE info to process. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	if (index < MAX_MC_EVT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		mc_evt = this_cpu_ptr(&mce_event[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		/* Copy the event structure and release the original */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		if (mce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			*mce = *mc_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		if (release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			mc_evt->in_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	/* Decrement the count to free the slot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	if (release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		__this_cpu_dec(mce_nest_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) void release_mce_event(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	get_mce_event(NULL, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void machine_check_ue_irq_work(struct irq_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	schedule_work(&mce_ue_event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * Queue up the MCE event which then can be handled later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static void machine_check_ue_event(struct machine_check_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	index = __this_cpu_inc_return(mce_ue_count) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	/* If queue is full, just return for now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if (index >= MAX_MC_EVT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		__this_cpu_dec(mce_ue_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	/* Queue work to process this event later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	irq_work_queue(&mce_ue_event_irq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  * Queue up the MCE event which then can be handled later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void machine_check_queue_event(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	struct machine_check_event evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	index = __this_cpu_inc_return(mce_queue_count) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	/* If queue is full, just return for now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	if (index >= MAX_MC_EVT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		__this_cpu_dec(mce_queue_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	/* Queue irq work to process this event later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	irq_work_queue(&mce_event_process_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) void mce_common_process_ue(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			   struct mce_error_info *mce_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	const struct exception_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	entry = search_kernel_exception_table(regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	if (entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		mce_err->ignore_event = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		regs->nip = extable_fixup(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  * process pending MCE event from the mce event queue. This function will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  * called during syscall exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static void machine_process_ue_event(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	struct machine_check_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	while (__this_cpu_read(mce_ue_count) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		index = __this_cpu_read(mce_ue_count) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		evt = this_cpu_ptr(&mce_ue_event_queue[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #ifdef CONFIG_MEMORY_FAILURE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		 * This should probably queued elsewhere, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		 * oh! well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		 * Don't report this machine check because the caller has a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		 * asked us to ignore the event, it has a fixup handler which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		 * will do the appropriate error handling and reporting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		if (evt->error_type == MCE_ERROR_TYPE_UE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 			if (evt->u.ue_error.ignore_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 				__this_cpu_dec(mce_ue_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			if (evt->u.ue_error.physical_address_provided) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 				unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 				pfn = evt->u.ue_error.physical_address >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 					PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 				memory_failure(pfn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 				pr_warn("Failed to identify bad address from "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 					"where the uncorrectable error (UE) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 					"was generated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		__this_cpu_dec(mce_ue_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  * process pending MCE event from the mce event queue. This function will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)  * called during syscall exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static void machine_check_process_queued_event(struct irq_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	struct machine_check_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	 * For now just print it to console.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	 * TODO: log this error event to FSP or nvram.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	while (__this_cpu_read(mce_queue_count) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		index = __this_cpu_read(mce_queue_count) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		evt = this_cpu_ptr(&mce_event_queue[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		if (evt->error_type == MCE_ERROR_TYPE_UE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		    evt->u.ue_error.ignore_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			__this_cpu_dec(mce_queue_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		machine_check_print_event_info(evt, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		__this_cpu_dec(mce_queue_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) void machine_check_print_event_info(struct machine_check_event *evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 				    bool user_mode, bool in_guest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	const char *level, *sevstr, *subtype, *err_type, *initiator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	uint64_t ea = 0, pa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	char dar_str[50];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	char pa_str[50];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	static const char *mc_ue_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		"Indeterminate",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		"Instruction fetch",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		"Page table walk ifetch",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		"Load/Store",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		"Page table walk Load/Store",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	static const char *mc_slb_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		"Indeterminate",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		"Parity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		"Multihit",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	static const char *mc_erat_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		"Indeterminate",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		"Parity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		"Multihit",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	static const char *mc_tlb_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		"Indeterminate",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		"Parity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		"Multihit",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	static const char *mc_user_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		"Indeterminate",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		"tlbie(l) invalid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		"scv invalid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	static const char *mc_ra_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		"Indeterminate",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		"Instruction fetch (bad)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		"Instruction fetch (foreign)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		"Page table walk ifetch (bad)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		"Page table walk ifetch (foreign)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		"Load (bad)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		"Store (bad)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		"Page table walk Load/Store (bad)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		"Page table walk Load/Store (foreign)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		"Load/Store (foreign)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	static const char *mc_link_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		"Indeterminate",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		"Instruction fetch (timeout)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		"Page table walk ifetch (timeout)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		"Load (timeout)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		"Store (timeout)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		"Page table walk Load/Store (timeout)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	static const char *mc_error_class[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		"Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		"Hardware error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		"Probable Hardware error (some chance of software cause)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		"Software error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		"Probable Software error (some chance of hardware cause)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	/* Print things out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	if (evt->version != MCE_V1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		pr_err("Machine Check Exception, Unknown event version %d !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		       evt->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	switch (evt->severity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	case MCE_SEV_NO_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		level = KERN_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		sevstr = "Harmless";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	case MCE_SEV_WARNING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		level = KERN_WARNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		sevstr = "Warning";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	case MCE_SEV_SEVERE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		level = KERN_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		sevstr = "Severe";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	case MCE_SEV_FATAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		level = KERN_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		sevstr = "Fatal";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	switch(evt->initiator) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	case MCE_INITIATOR_CPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		initiator = "CPU";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	case MCE_INITIATOR_PCI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		initiator = "PCI";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	case MCE_INITIATOR_ISA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		initiator = "ISA";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	case MCE_INITIATOR_MEMORY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		initiator = "Memory";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	case MCE_INITIATOR_POWERMGM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		initiator = "Power Management";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	case MCE_INITIATOR_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		initiator = "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	switch (evt->error_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	case MCE_ERROR_TYPE_UE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		err_type = "UE";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		subtype = evt->u.ue_error.ue_error_type <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 			ARRAY_SIZE(mc_ue_types) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 			mc_ue_types[evt->u.ue_error.ue_error_type]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 			: "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		if (evt->u.ue_error.effective_address_provided)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			ea = evt->u.ue_error.effective_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		if (evt->u.ue_error.physical_address_provided)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 			pa = evt->u.ue_error.physical_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	case MCE_ERROR_TYPE_SLB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		err_type = "SLB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		subtype = evt->u.slb_error.slb_error_type <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			ARRAY_SIZE(mc_slb_types) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 			mc_slb_types[evt->u.slb_error.slb_error_type]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			: "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		if (evt->u.slb_error.effective_address_provided)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 			ea = evt->u.slb_error.effective_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	case MCE_ERROR_TYPE_ERAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		err_type = "ERAT";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		subtype = evt->u.erat_error.erat_error_type <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 			ARRAY_SIZE(mc_erat_types) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 			mc_erat_types[evt->u.erat_error.erat_error_type]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 			: "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		if (evt->u.erat_error.effective_address_provided)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 			ea = evt->u.erat_error.effective_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	case MCE_ERROR_TYPE_TLB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		err_type = "TLB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		subtype = evt->u.tlb_error.tlb_error_type <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 			ARRAY_SIZE(mc_tlb_types) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 			mc_tlb_types[evt->u.tlb_error.tlb_error_type]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 			: "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		if (evt->u.tlb_error.effective_address_provided)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 			ea = evt->u.tlb_error.effective_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	case MCE_ERROR_TYPE_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		err_type = "User";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		subtype = evt->u.user_error.user_error_type <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 			ARRAY_SIZE(mc_user_types) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 			mc_user_types[evt->u.user_error.user_error_type]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			: "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		if (evt->u.user_error.effective_address_provided)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 			ea = evt->u.user_error.effective_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	case MCE_ERROR_TYPE_RA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		err_type = "Real address";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		subtype = evt->u.ra_error.ra_error_type <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 			ARRAY_SIZE(mc_ra_types) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 			mc_ra_types[evt->u.ra_error.ra_error_type]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 			: "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		if (evt->u.ra_error.effective_address_provided)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 			ea = evt->u.ra_error.effective_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	case MCE_ERROR_TYPE_LINK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		err_type = "Link";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		subtype = evt->u.link_error.link_error_type <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 			ARRAY_SIZE(mc_link_types) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 			mc_link_types[evt->u.link_error.link_error_type]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 			: "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		if (evt->u.link_error.effective_address_provided)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 			ea = evt->u.link_error.effective_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	case MCE_ERROR_TYPE_DCACHE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		err_type = "D-Cache";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		subtype = "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	case MCE_ERROR_TYPE_ICACHE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		err_type = "I-Cache";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		subtype = "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	case MCE_ERROR_TYPE_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		err_type = "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		subtype = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	dar_str[0] = pa_str[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	if (ea && evt->srr0 != ea) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		/* Load/Store address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		n = sprintf(dar_str, "DAR: %016llx ", ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		if (pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 			sprintf(dar_str + n, "paddr: %016llx ", pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	} else if (pa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		sprintf(pa_str, " paddr: %016llx", pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		level, evt->cpu, sevstr, in_guest ? "Guest" : "Host",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		err_type, subtype, dar_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		evt->disposition == MCE_DISPOSITION_RECOVERED ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		"Recovered" : "Not recovered");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	if (in_guest || user_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 			level, evt->cpu, current->pid, current->comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 			in_guest ? "Guest " : "", evt->srr0, pa_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 			level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		mc_error_class[evt->error_class] : "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	/* Display faulty slb contents for SLB errors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	if (evt->error_type == MCE_ERROR_TYPE_SLB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		slb_dump_contents(local_paca->mce_faulty_slbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) EXPORT_SYMBOL_GPL(machine_check_print_event_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)  * This function is called in real mode. Strictly no printk's please.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)  * regs->nip and regs->msr contains srr0 and ssr1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) long notrace machine_check_early(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	long handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	this_cpu_set_ftrace_enabled(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	/* Do not use nmi_enter/exit for pseries hpte guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		nmi_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	hv_nmi_check_nonrecoverable(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	 * See if platform is capable of handling machine check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	if (ppc_md.machine_check_early)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		handled = ppc_md.machine_check_early(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		nmi_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	this_cpu_set_ftrace_enabled(ftrace_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	return handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	DTRIG_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	DTRIG_VECTOR_CI,	/* need to emulate vector CI load instr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	DTRIG_SUSPEND_ESCAPE,	/* need to escape from TM suspend mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) } hmer_debug_trig_function;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static int init_debug_trig_function(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	int pvr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	struct device_node *cpun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	struct property *prop = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	const char *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	/* First look in the device tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	cpun = of_get_cpu_node(smp_processor_id(), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	if (cpun) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 		of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 					    prop, str) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 			if (strcmp(str, "bit17-vector-ci-load") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 				hmer_debug_trig_function = DTRIG_VECTOR_CI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 			else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 				hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		of_node_put(cpun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	/* If we found the property, don't look at PVR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	if (prop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	pvr = mfspr(SPRN_PVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	/* Check for POWER9 Nimbus (scale-out) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		/* DD2.2 and later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		if ((pvr & 0xfff) >= 0x202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 			hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 		/* DD2.0 and DD2.1 - used for vector CI load emulation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		else if ((pvr & 0xfff) >= 0x200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 			hmer_debug_trig_function = DTRIG_VECTOR_CI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	switch (hmer_debug_trig_function) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	case DTRIG_VECTOR_CI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		pr_debug("HMI debug trigger used for vector CI load\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	case DTRIG_SUSPEND_ESCAPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 		pr_debug("HMI debug trigger used for TM suspend escape\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) __initcall(init_debug_trig_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)  * Handle HMIs that occur as a result of a debug trigger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)  * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)  * -1 means this is not a HMI cause that we know about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)  *  0 means no further handling is required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)  *  1 means further handling is required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) long hmi_handle_debugtrig(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	unsigned long hmer = mfspr(SPRN_HMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	long ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	/* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	if (!((hmer & HMER_DEBUG_TRIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	      && hmer_debug_trig_function != DTRIG_UNKNOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	hmer &= ~HMER_DEBUG_TRIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	/* HMER is a write-AND register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	switch (hmer_debug_trig_function) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	case DTRIG_VECTOR_CI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		 * Now to avoid problems with soft-disable we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		 * only do the emulation if we are coming from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		 * host user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 		if (regs && user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 			ret = local_paca->hmi_p9_special_emu = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	 * See if any other HMI causes remain to be handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	if (hmer & mfspr(SPRN_HMEER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)  * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) long hmi_exception_realmode(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	local_paca->hmi_irqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	ret = hmi_handle_debugtrig(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	wait_for_subcore_guest_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	if (ppc_md.hmi_exception_early)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 		ppc_md.hmi_exception_early(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	wait_for_tb_resync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }