Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Architecture specific (PPC64) functions for kexec based crash dumps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2005, IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Created by: Haren Myneni
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/setjmp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * The primary CPU waits a while for all secondary CPUs to enter. This is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * avoid sending an IPI if the secondary CPUs are entering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * crash_kexec_secondary on their own (eg via a system reset).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * The secondary timeout has to be longer than the primary. Both timeouts are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * in milliseconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define PRIMARY_TIMEOUT		500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define SECONDARY_TIMEOUT	1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define IPI_TIMEOUT		10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define REAL_MODE_TIMEOUT	10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static int time_to_dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * crash_wake_offline should be set to 1 by platforms that intend to wake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * up offline cpus prior to jumping to a kdump kernel. Currently powernv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * sets it to 1, since we want to avoid things from happening when an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * offline CPU wakes up due to something like an HMI (malfunction error),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * which propagates to all threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) int crash_wake_offline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define CRASH_HANDLER_MAX 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) /* List of shutdown handles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static DEFINE_SPINLOCK(crash_handlers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static int crash_shutdown_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) static int handle_fault(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	if (crash_shutdown_cpu == smp_processor_id())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		longjmp(crash_shutdown_buf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) static atomic_t cpus_in_crash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) void crash_ipi_callback(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	static cpumask_t cpus_state_saved = CPU_MASK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	hard_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	if (!cpumask_test_cpu(cpu, &cpus_state_saved)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		crash_save_cpu(regs, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		cpumask_set_cpu(cpu, &cpus_state_saved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	atomic_inc(&cpus_in_crash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	 * Starting the kdump boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	 * This barrier is needed to make sure that all CPUs are stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	while (!time_to_dump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	if (ppc_md.kexec_cpu_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		ppc_md.kexec_cpu_down(1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	kexec_smp_wait();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	for (;;);	/* FIXME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	/* NOTREACHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void crash_kexec_prepare_cpus(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	unsigned int msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	int tries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	int (*old_handler)(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	printk(KERN_EMERG "Sending IPI to other CPUs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	if (crash_wake_offline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		ncpus = num_present_cpus() - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	crash_send_ipi(crash_ipi_callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	 * FIXME: Until we will have the way to stop other CPUs reliably,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	 * the crash CPU will send an IPI and wait for other CPUs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	 * respond.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	msecs = IPI_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	while ((atomic_read(&cpus_in_crash) < ncpus) && (--msecs > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	/* Would it be better to replace the trap vector here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	if (atomic_read(&cpus_in_crash) >= ncpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		printk(KERN_EMERG "IPI complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	printk(KERN_EMERG "ERROR: %d cpu(s) not responding\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		ncpus - atomic_read(&cpus_in_crash));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	 * If we have a panic timeout set then we can't wait indefinitely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	 * for someone to activate system reset. We also give up on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	 * second time through if system reset fail to work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	if ((panic_timeout > 0) || (tries > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 * A system reset will cause all CPUs to take an 0x100 exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	 * The primary CPU returns here via setjmp, and the secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	 * CPUs reexecute the crash_kexec_secondary path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	old_handler = __debugger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	__debugger = handle_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	crash_shutdown_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if (setjmp(crash_shutdown_buf) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		printk(KERN_EMERG "Activate system reset (dumprestart) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 				  "to stop other cpu(s)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		 * A system reset will force all CPUs to execute the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		 * crash code again. We need to reset cpus_in_crash so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		 * wait for everyone to do this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		atomic_set(&cpus_in_crash, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		while (atomic_read(&cpus_in_crash) < ncpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 			cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	crash_shutdown_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	__debugger = old_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	tries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  * This function will be called by secondary cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) void crash_kexec_secondary(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	int msecs = SECONDARY_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	/* Wait for the primary crash CPU to signal its progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	while (crashing_cpu < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		if (--msecs < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			/* No response, kdump image may not have been loaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	crash_ipi_callback(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #else	/* ! CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static void crash_kexec_prepare_cpus(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	 * move the secondaries to us so that we can copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	 * the new kernel 0-0x100 safely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	 * do this if kexec in setup.c ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	smp_release_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	/* FIXME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) void crash_kexec_secondary(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #endif	/* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* wait for all the CPUs to hit real mode but timeout if they don't come in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void __maybe_unused crash_kexec_wait_realmode(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	unsigned int msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	msecs = REAL_MODE_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		if (i == cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		while (paca_ptrs[i]->kexec_state < KEXEC_STATE_REAL_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 			barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			msecs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static inline void crash_kexec_wait_realmode(int cpu) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #endif	/* CONFIG_SMP && CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  * Register a function to be called on shutdown.  Only use this if you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  * can't reset your device in the second kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int crash_shutdown_register(crash_shutdown_t handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	unsigned int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	spin_lock(&crash_handlers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		if (!crash_shutdown_handles[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			/* Insert handle at first empty entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			crash_shutdown_handles[i] = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 			rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (i == CRASH_HANDLER_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		printk(KERN_ERR "Crash shutdown handles full, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		       "not registered.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	spin_unlock(&crash_handlers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) EXPORT_SYMBOL(crash_shutdown_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int crash_shutdown_unregister(crash_shutdown_t handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	unsigned int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	spin_lock(&crash_handlers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		if (crash_shutdown_handles[i] == handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	if (i == CRASH_HANDLER_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		printk(KERN_ERR "Crash shutdown handle not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		/* Shift handles down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		for (; i < (CRASH_HANDLER_MAX - 1); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 			crash_shutdown_handles[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 				crash_shutdown_handles[i+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		 * Reset last entry to NULL now that it has been shifted down,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		 * this will allow new handles to be added here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		crash_shutdown_handles[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	spin_unlock(&crash_handlers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) EXPORT_SYMBOL(crash_shutdown_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) void default_machine_crash_shutdown(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	int (*old_handler)(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	/* Avoid hardlocking with irresponsive CPU holding logbuf_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	printk_nmi_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	 * This function is only called after the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	 * has panicked or is otherwise in a critical state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	 * The minimum amount of code to allow a kexec'd kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	 * to run successfully needs to happen here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	 * In practice this means stopping other cpus in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	 * an SMP system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	 * The kernel is broken so disable interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	hard_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	 * Make a note of crashing cpu. Will be used in machine_kexec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	 * such that another IPI will not be sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	crashing_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	 * If we came in via system reset, wait a while for the secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	 * CPUs to enter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	if (TRAP(regs) == 0x100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		mdelay(PRIMARY_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	crash_kexec_prepare_cpus(crashing_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	crash_save_cpu(regs, crashing_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	time_to_dump = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	crash_kexec_wait_realmode(crashing_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	machine_kexec_mask_interrupts();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	 * Call registered shutdown routines safely.  Swap out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	 * __debugger_fault_handler, and replace on exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	old_handler = __debugger_fault_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	__debugger_fault_handler = handle_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	crash_shutdown_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	for (i = 0; i < CRASH_HANDLER_MAX && crash_shutdown_handles[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		if (setjmp(crash_shutdown_buf) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			 * Insert syncs and delay to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			 * instructions in the dangerous region don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			 * leak away from this protected region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			asm volatile("sync; isync");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			/* dangerous region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 			crash_shutdown_handles[i]();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			asm volatile("sync; isync");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	crash_shutdown_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	__debugger_fault_handler = old_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	if (ppc_md.kexec_cpu_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		ppc_md.kexec_cpu_down(1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }