Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *   Machine check handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *    Copyright IBM Corp. 2000, 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *    Author(s): Ingo Adlung <adlung@de.ibm.com>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *		 Cornelia Huck <cornelia.huck@de.ibm.com>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *		 Heiko Carstens <heiko.carstens@de.ibm.com>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/kmemleak.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/lowcore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/stp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/cputime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/crw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <asm/ctl_reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) struct mcck_struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	unsigned int kill_task : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	unsigned int channel_report : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	unsigned int warning : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	unsigned int stp_queue : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	unsigned long mcck_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) static struct kmem_cache *mcesa_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) static unsigned long mcesa_origin_lc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static inline int nmi_needs_mcesa(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	return MACHINE_HAS_VX || MACHINE_HAS_GS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) static inline unsigned long nmi_get_mcesa_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	if (MACHINE_HAS_GS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		return MCESA_MAX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	return MCESA_MIN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * The initial machine check extended save area for the boot CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * It will be replaced by nmi_init() with an allocated structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * The structure is required for machine check happening early in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * the boot process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) void __init nmi_alloc_boot_cpu(struct lowcore *lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	if (!nmi_needs_mcesa())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	lc->mcesad = (unsigned long) &boot_mcesa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	if (MACHINE_HAS_GS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		lc->mcesad |= ilog2(MCESA_MAX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static int __init nmi_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	unsigned long origin, cr0, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	if (!nmi_needs_mcesa())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	size = nmi_get_mcesa_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	if (size > MCESA_MIN_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		mcesa_origin_lc = ilog2(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	/* create slab cache for the machine-check-extended-save-areas */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	if (!mcesa_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		panic("Couldn't create nmi save area cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (!origin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		panic("Couldn't allocate nmi save area");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	/* The pointer is stored with mcesa_bits ORed in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	kmemleak_not_leak((void *) origin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	__ctl_store(cr0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	__ctl_clear_bit(0, 28); /* disable lowcore protection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	/* Replace boot_mcesa on the boot CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	S390_lowcore.mcesad = origin | mcesa_origin_lc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	__ctl_load(cr0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) early_initcall(nmi_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int nmi_alloc_per_cpu(struct lowcore *lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	unsigned long origin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (!nmi_needs_mcesa())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (!origin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	/* The pointer is stored with mcesa_bits ORed in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	kmemleak_not_leak((void *) origin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	lc->mcesad = origin | mcesa_origin_lc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void nmi_free_per_cpu(struct lowcore *lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	if (!nmi_needs_mcesa())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	kmem_cache_free(mcesa_cache, (void *)(lc->mcesad & MCESA_ORIGIN_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static notrace void s390_handle_damage(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	smp_emergency_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	disabled_wait();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) NOKPROBE_SYMBOL(s390_handle_damage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * Main machine check handler function. Will be called with interrupts enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * or disabled and machine checks enabled or disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void s390_handle_mcck(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	struct mcck_struct mcck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	 * Disable machine checks and get the current state of accumulated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	 * machine checks. Afterwards delete the old state and enable machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	 * checks again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	local_mcck_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	mcck = *this_cpu_ptr(&cpu_mcck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	local_mcck_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	if (mcck.channel_report)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		crw_handle_channel_report();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	 * A warning may remain for a prolonged period on the bare iron.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	 * (actually until the machine is powered off, or the problem is gone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	 * So we just stop listening for the WARNING MCH and avoid continuously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	 * being interrupted.  One caveat is however, that we must do this per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	 * processor and cannot use the smp version of ctl_clear_bit().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	 * On VM we only get one interrupt per virtally presented machinecheck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	 * Though one suffices, we may get one interrupt per (virtual) cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (mcck.warning) {	/* WARNING pending ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		static int mchchk_wng_posted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		/* Use single cpu clear, as we cannot handle smp here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		__ctl_clear_bit(14, 24);	/* Disable WARNING MCH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		if (xchg(&mchchk_wng_posted, 1) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			kill_cad_pid(SIGPWR, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	if (mcck.stp_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		stp_queue_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (mcck.kill_task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		printk(KERN_EMERG "mcck: Terminating task because of machine "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		       "malfunction (code 0x%016lx).\n", mcck.mcck_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		       current->comm, current->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		do_exit(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) EXPORT_SYMBOL_GPL(s390_handle_mcck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * returns 0 if all required registers are available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  * returns 1 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static int notrace s390_check_registers(union mci mci, int umode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	union ctlreg2 cr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	int kill_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	kill_task = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	if (!mci.gr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		 * General purpose registers couldn't be restored and have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		 * unknown contents. Stop system or terminate process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		if (!umode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		kill_task = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	/* Check control registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (!mci.cr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		 * Control registers have unknown contents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		 * Can't recover and therefore stopping machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	if (!mci.fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		 * Floating point registers can't be restored. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		 * kernel currently uses floating point registers the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		 * system is stopped. If the process has its floating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		 * pointer registers loaded it is terminated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		if (S390_lowcore.fpu_flags & KERNEL_VXR_V0V7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 			s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		if (!test_cpu_flag(CIF_FPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			kill_task = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	if (!mci.fc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		 * Floating point control register can't be restored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		 * If the kernel currently uses the floating pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		 * registers and needs the FPC register the system is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		 * stopped. If the process has its floating pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		 * registers loaded it is terminated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		if (S390_lowcore.fpu_flags & KERNEL_FPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		if (!test_cpu_flag(CIF_FPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			kill_task = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	if (MACHINE_HAS_VX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		if (!mci.vr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			 * Vector registers can't be restored. If the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			 * currently uses vector registers the system is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			 * stopped. If the process has its vector registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			 * loaded it is terminated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			if (S390_lowcore.fpu_flags & KERNEL_VXR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 				s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 			if (!test_cpu_flag(CIF_FPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 				kill_task = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	/* Check if access registers are valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	if (!mci.ar) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		 * Access registers have unknown contents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		 * Terminating task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		kill_task = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	/* Check guarded storage registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	cr2.val = S390_lowcore.cregs_save_area[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	if (cr2.gse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		if (!mci.gs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 			 * Guarded storage register can't be restored and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			 * the current processes uses guarded storage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 			 * It has to be terminated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 			kill_task = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	/* Check if old PSW is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	if (!mci.wp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		 * Can't tell if we come from user or kernel mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		 * -> stopping machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	/* Check for invalid kernel instruction address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	if (!mci.ia && !umode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		 * The instruction address got lost while running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		 * in the kernel -> stopping machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	if (!mci.ms || !mci.pm || !mci.ia)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		kill_task = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	return kill_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) NOKPROBE_SYMBOL(s390_check_registers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  * Backup the guest's machine check info to its description block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static void notrace s390_backup_mcck_info(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	struct mcck_volatile_info *mcck_backup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	struct sie_page *sie_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	/* r14 contains the sie block, which was set in sie64a */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct kvm_s390_sie_block *sie_block =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 			(struct kvm_s390_sie_block *) regs->gprs[14];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	if (sie_block == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		/* Something's seriously wrong, stop system. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	sie_page = container_of(sie_block, struct sie_page, sie_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	mcck_backup = &sie_page->mcck_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	mcck_backup->mcic = S390_lowcore.mcck_interruption_code &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 				~(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	mcck_backup->ext_damage_code = S390_lowcore.external_damage_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	mcck_backup->failing_storage_address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 			= S390_lowcore.failing_storage_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) NOKPROBE_SYMBOL(s390_backup_mcck_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #define MAX_IPD_COUNT	29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #define MAX_IPD_TIME	(5 * 60 * USEC_PER_SEC) /* 5 minutes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #define ED_STP_ISLAND	6	/* External damage STP island check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #define ED_STP_SYNC	7	/* External damage STP sync check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #define MCCK_CODE_NO_GUEST	(MCCK_CODE_CP | MCCK_CODE_EXT_DAMAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)  * machine check handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) int notrace s390_do_machine_check(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	static int ipd_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	static DEFINE_SPINLOCK(ipd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	static unsigned long long last_ipd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	struct mcck_struct *mcck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	unsigned long long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	union mci mci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	unsigned long mcck_dam_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	int mcck_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	nmi_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	inc_irq_stat(NMI_NMI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	mci.val = S390_lowcore.mcck_interruption_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	mcck = this_cpu_ptr(&cpu_mcck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	if (mci.sd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		/* System damage -> stopping machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	 * Reinject the instruction processing damages' machine checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	 * including Delayed Access Exception into the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	 * instead of damaging the host if they happen in the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	if (mci.pd && !test_cpu_flag(CIF_MCCK_GUEST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		if (mci.b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			/* Processing backup -> verify if we can survive this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			u64 z_mcic, o_mcic, t_mcic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 			z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 				  1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 				  1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 				  1ULL<<16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			t_mcic = mci.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 			if (((t_mcic & z_mcic) != 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			    ((t_mcic & o_mcic) != o_mcic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 				s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			 * Nullifying exigent condition, therefore we might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			 * retry this instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			spin_lock(&ipd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 			tmp = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 				ipd_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 				ipd_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 			last_ipd = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 			if (ipd_count == MAX_IPD_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 				s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 			spin_unlock(&ipd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			/* Processing damage -> stopping machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 			s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	if (s390_check_registers(mci, user_mode(regs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		 * Couldn't restore all register contents for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		 * user space process -> mark task for termination.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		mcck->kill_task = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		mcck->mcck_code = mci.val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		mcck_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	 * Backup the machine check's info if it happens when the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	 * is running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	if (test_cpu_flag(CIF_MCCK_GUEST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		s390_backup_mcck_info(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	if (mci.cd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		/* Timing facility damage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	if (mci.ed && mci.ec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		/* External damage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			mcck->stp_queue |= stp_sync_check();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 			mcck->stp_queue |= stp_island_check();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		mcck_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	 * Reinject storage related machine checks into the guest if they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	 * happen when the guest is running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	if (!test_cpu_flag(CIF_MCCK_GUEST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		if (mci.se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 			/* Storage error uncorrected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 			s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		if (mci.ke)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 			/* Storage key-error uncorrected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 			s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		if (mci.ds && mci.fa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 			/* Storage degradation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 			s390_handle_damage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	if (mci.cp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		/* Channel report word pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		mcck->channel_report = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		mcck_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	if (mci.w) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		/* Warning pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		mcck->warning = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		mcck_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	 * If there are only Channel Report Pending and External Damage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	 * machine checks, they will not be reinjected into the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	 * because they refer to host conditions only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	mcck_dam_code = (mci.val & MCIC_SUBCLASS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	if (test_cpu_flag(CIF_MCCK_GUEST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	(mcck_dam_code & MCCK_CODE_NO_GUEST) != mcck_dam_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		/* Set exit reason code for host's later handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		*((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	clear_cpu_flag(CIF_MCCK_GUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	if (user_mode(regs) && mcck_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		nmi_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	if (mcck_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		schedule_mcck_handler();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	nmi_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) NOKPROBE_SYMBOL(s390_do_machine_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static int __init machine_check_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	ctl_set_bit(14, 25);	/* enable external damage MCH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	ctl_set_bit(14, 27);	/* enable system recovery MCH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	ctl_set_bit(14, 24);	/* enable warning MCH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) early_initcall(machine_check_init);