Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/daifflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/debug-monitors.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/exec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/mte.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/smp_plat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * This is allocated by cpu_suspend_init(), and used to store a pointer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * the 'struct sleep_stack_data' the contains a particular CPUs state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) unsigned long *sleep_save_stash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * This hook is provided so that cpu_suspend code can restore HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * breakpoints as early as possible in the resume path, before reenabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * debug exceptions. Code cannot be run from a CPU PM notifier since by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * time the notifier runs debug exceptions might have been enabled already,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * with HW breakpoints registers content still in an unknown state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) static int (*hw_breakpoint_restore)(unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) void __init cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	/* Prevent multiple restore hook initializations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	if (WARN_ON(hw_breakpoint_restore))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	hw_breakpoint_restore = hw_bp_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) void notrace __cpu_suspend_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	 * We are resuming from reset with the idmap active in TTBR0_EL1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	 * We must uninstall the idmap and restore the expected MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	 * state before we can possibly return to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	cpu_uninstall_idmap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	/* Restore CnP bit in TTBR1_EL1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	if (system_supports_cnp())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	 * PSTATE was not saved over suspend/resume, re-enable any detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	 * features that might not have been set correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	__uaccess_enable_hw_pan();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	uao_thread_switch(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	 * Restore HW breakpoint registers to sane values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	 * before debug exceptions are possibly reenabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	 * by cpu_suspend()s local_daif_restore() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	if (hw_breakpoint_restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		hw_breakpoint_restore(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	 * On resume, firmware implementing dynamic mitigation will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	 * have turned the mitigation on. If the user has forcefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	 * disabled it, make sure their wishes are obeyed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	spectre_v4_enable_mitigation(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	/* Restore additional feature-specific configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	ptrauth_suspend_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * cpu_suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * arg: argument to pass to the finisher function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * fn: finisher function pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	struct sleep_stack_data state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	/* Report any MTE async fault before going to suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	mte_suspend_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	 * From this point debug exceptions are disabled to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	 * updates to mdscr register (saved and restored along with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	 * general purpose registers) from kernel debuggers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	flags = local_daif_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	 * Function graph tracer state gets incosistent when the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	 * calls functions that never return (aka suspend finishers) hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	 * disable graph tracing during their execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	pause_graph_tracing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	if (__cpu_suspend_enter(&state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		/* Call the suspend finisher */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		ret = fn(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		 * Never gets here, unless the suspend finisher fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		 * Successful cpu_suspend() should return from cpu_resume(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		 * returning through this code path is considered an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		 * If the return value is set to 0 force ret = -EOPNOTSUPP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		 * to make sure a proper error condition is propagated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 			ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		RCU_NONIDLE(__cpu_suspend_exit());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	unpause_graph_tracing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	 * Restore pstate flags. OS lock and mdscr have been already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	 * restored, so from this point onwards, debugging is fully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	 * renabled if it was enabled when core started shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	local_daif_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static int __init cpu_suspend_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	/* ctx_ptr is an array of physical addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 				   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (WARN_ON(!sleep_save_stash))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) early_initcall(cpu_suspend_init);