^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 1996-2000 Russell King - Converted to ARM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Original Copyright (C) 1995 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/idmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/virt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "reboot.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) typedef void (*phys_reset_t)(unsigned long, bool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Function pointers to optional machine specific functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) void (*pm_power_off)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) EXPORT_SYMBOL(pm_power_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * A temporary stack to use for CPU reset. This is static so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * don't clobber it with the identity mapping. When running with this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * stack, any references to the current task *will not work* so you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * should really do as little as possible before jumping to your reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static u64 soft_restart_stack[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static void __soft_restart(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) phys_reset_t phys_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Take out a flat memory mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) setup_mm_for_reboot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Clean and invalidate caches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* Turn off caching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) cpu_proc_fin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* Push out any further dirty data, and ensure cache is empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* Switch to the identity mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) phys_reset = (phys_reset_t)virt_to_idmap(cpu_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* original stub should be restored by kvm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) phys_reset((unsigned long)addr, is_hyp_mode_available());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* Should never get here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) void _soft_restart(unsigned long addr, bool disable_l2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Disable interrupts first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) raw_local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) local_fiq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Disable the L2 if we're the last man standing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (disable_l2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) outer_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* Change to the new stack and continue with the reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) call_with_stack(__soft_restart, (void *)addr, (void *)stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Should never get here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) void soft_restart(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) _soft_restart(addr, num_online_cpus() == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Called by kexec, immediately prior to machine_kexec().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * This must completely disable all secondary CPUs; simply causing those CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * kexec'd kernel to use any and all RAM as it sees fit, without having to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * functionality embodied in smp_shutdown_nonboot_cpus() to achieve this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) void machine_shutdown(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) smp_shutdown_nonboot_cpus(reboot_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Halting simply requires that the secondary CPUs stop performing any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * activity (executing tasks, handling interrupts). smp_send_stop()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * achieves this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void machine_halt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) smp_send_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * Power-off simply requires that the secondary CPUs stop performing any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * activity (executing tasks, handling interrupts). smp_send_stop()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * achieves this. When the system power is turned off, it will take all CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) void machine_power_off(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) smp_send_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (pm_power_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) pm_power_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * Restart requires that the secondary CPUs stop performing any activity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * while the primary CPU resets the system. Systems with a single CPU can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * use soft_restart() as their machine descriptor's .restart hook, since that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * will cause the only available CPU to reset. Systems with multiple CPUs must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * provide a HW restart implementation, to ensure that all CPUs reset at once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * This is required so that any code running after reset on the primary CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * doesn't have to co-ordinate with other CPUs to ensure they aren't still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * executing pre-reset code, and using RAM that the primary CPU's code wishes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * to use. Implementing such co-ordination would be essentially impossible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void machine_restart(char *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) smp_send_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) do_kernel_pre_restart(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) do_kernel_restart(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* Give a grace period for failure to restart of 1s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) mdelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Whoops - the platform was unable to reboot. Tell the user! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) printk("Reboot failed -- System halted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }