Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  Derived from "arch/i386/kernel/process.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *    Copyright (C) 1995  Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *  Paul Mackerras (paulus@cs.anu.edu.au)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *  PowerPC version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/user.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/prctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/init_task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/mqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/personality.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/elf-randomize.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/pkeys.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/seq_buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <asm/runlatch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <asm/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <asm/tm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <asm/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <asm/hw_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <asm/code-patching.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <asm/exec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <asm/livepatch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include <asm/cpu_has_feature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include <asm/asm-prototypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include <asm/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) /* Transactional Memory debug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #ifdef TM_DEBUG_SW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define TM_DEBUG(x...) printk(KERN_INFO x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define TM_DEBUG(x...) do { } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) extern unsigned long _get_SP(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  * Are we running in "Suspend disabled" mode? If so we have to block any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  * sigreturn that would get us into suspended state, and we also warn in some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  * other paths that we should never reach with suspend disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) bool tm_suspend_disabled __ro_after_init = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) static void check_if_tm_restore_required(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	 * If we are saving the current thread's registers, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	 * thread is in a transactional state, set the TIF_RESTORE_TM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	 * bit so that we know to restore the registers before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	 * returning to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	if (tsk == current && tsk->thread.regs &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	    MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	    !test_thread_flag(TIF_RESTORE_TM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		set_thread_flag(TIF_RESTORE_TM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) bool strict_msr_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) EXPORT_SYMBOL(strict_msr_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) static int __init enable_strict_msr_control(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	strict_msr_control = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	pr_info("Enabling strict facility control\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) early_param("ppc_strict_facility_enable", enable_strict_msr_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) /* notrace because it's called by restore_math */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) unsigned long notrace msr_check_and_set(unsigned long bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	unsigned long oldmsr = mfmsr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	unsigned long newmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	newmsr = oldmsr | bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		newmsr |= MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	if (oldmsr != newmsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		mtmsr_isync(newmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	return newmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) EXPORT_SYMBOL_GPL(msr_check_and_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) /* notrace because it's called by restore_math */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) void notrace __msr_check_and_clear(unsigned long bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	unsigned long oldmsr = mfmsr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	unsigned long newmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	newmsr = oldmsr & ~bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		newmsr &= ~MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	if (oldmsr != newmsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		mtmsr_isync(newmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) EXPORT_SYMBOL(__msr_check_and_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) #ifdef CONFIG_PPC_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static void __giveup_fpu(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	unsigned long msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	save_fpu(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	msr = tsk->thread.regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	if (cpu_has_feature(CPU_FTR_VSX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		msr &= ~MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	tsk->thread.regs->msr = msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) void giveup_fpu(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	check_if_tm_restore_required(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	msr_check_and_set(MSR_FP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	__giveup_fpu(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	msr_check_and_clear(MSR_FP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) EXPORT_SYMBOL(giveup_fpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  * Make sure the floating-point register state in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  * the thread_struct is up to date for task tsk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) void flush_fp_to_thread(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	if (tsk->thread.regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		 * We need to disable preemption here because if we didn't,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		 * another process could get scheduled after the regs->msr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		 * test but before we have finished saving the FP registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		 * to the thread_struct.  That process could take over the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		 * FPU, and then when we get scheduled again we would store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		 * bogus values for the remaining FP registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		if (tsk->thread.regs->msr & MSR_FP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 			 * This should only ever be called for current or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			 * for a stopped child process.  Since we save away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 			 * the FP register state on context switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 			 * there is something wrong if a stopped child appears
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 			 * to still have its FP state in the CPU registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 			BUG_ON(tsk != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			giveup_fpu(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) EXPORT_SYMBOL_GPL(flush_fp_to_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) void enable_kernel_fp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	unsigned long cpumsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	cpumsr = msr_check_and_set(MSR_FP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		check_if_tm_restore_required(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		 * If a thread has already been reclaimed then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		 * checkpointed registers are on the CPU but have definitely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		 * been saved by the reclaim code. Don't need to and *cannot*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		 * giveup as this would save  to the 'live' structure not the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		 * checkpointed structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		if (!MSR_TM_ACTIVE(cpumsr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		     MSR_TM_ACTIVE(current->thread.regs->msr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		__giveup_fpu(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) EXPORT_SYMBOL(enable_kernel_fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) static inline void __giveup_fpu(struct task_struct *tsk) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) #endif /* CONFIG_PPC_FPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) static void __giveup_altivec(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	unsigned long msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	save_altivec(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	msr = tsk->thread.regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	msr &= ~MSR_VEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	if (cpu_has_feature(CPU_FTR_VSX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		msr &= ~MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	tsk->thread.regs->msr = msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) void giveup_altivec(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	check_if_tm_restore_required(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	msr_check_and_set(MSR_VEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	__giveup_altivec(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	msr_check_and_clear(MSR_VEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) EXPORT_SYMBOL(giveup_altivec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) void enable_kernel_altivec(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	unsigned long cpumsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	cpumsr = msr_check_and_set(MSR_VEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		check_if_tm_restore_required(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		 * If a thread has already been reclaimed then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		 * checkpointed registers are on the CPU but have definitely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		 * been saved by the reclaim code. Don't need to and *cannot*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		 * giveup as this would save  to the 'live' structure not the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		 * checkpointed structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		if (!MSR_TM_ACTIVE(cpumsr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		     MSR_TM_ACTIVE(current->thread.regs->msr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		__giveup_altivec(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) EXPORT_SYMBOL(enable_kernel_altivec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  * Make sure the VMX/Altivec register state in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  * the thread_struct is up to date for task tsk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) void flush_altivec_to_thread(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	if (tsk->thread.regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		if (tsk->thread.regs->msr & MSR_VEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 			BUG_ON(tsk != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 			giveup_altivec(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) #endif /* CONFIG_ALTIVEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) static void __giveup_vsx(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	unsigned long msr = tsk->thread.regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	 * We should never be ssetting MSR_VSX without also setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	 * MSR_FP and MSR_VEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	/* __giveup_fpu will clear MSR_VSX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	if (msr & MSR_FP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		__giveup_fpu(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	if (msr & MSR_VEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		__giveup_altivec(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) static void giveup_vsx(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	check_if_tm_restore_required(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	__giveup_vsx(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) void enable_kernel_vsx(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	unsigned long cpumsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	if (current->thread.regs &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	    (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		check_if_tm_restore_required(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		 * If a thread has already been reclaimed then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		 * checkpointed registers are on the CPU but have definitely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		 * been saved by the reclaim code. Don't need to and *cannot*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		 * giveup as this would save  to the 'live' structure not the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		 * checkpointed structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		if (!MSR_TM_ACTIVE(cpumsr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		     MSR_TM_ACTIVE(current->thread.regs->msr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		__giveup_vsx(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) EXPORT_SYMBOL(enable_kernel_vsx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) void flush_vsx_to_thread(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	if (tsk->thread.regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			BUG_ON(tsk != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			giveup_vsx(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) #endif /* CONFIG_VSX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) #ifdef CONFIG_SPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) void giveup_spe(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	check_if_tm_restore_required(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	msr_check_and_set(MSR_SPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	__giveup_spe(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	msr_check_and_clear(MSR_SPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) EXPORT_SYMBOL(giveup_spe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) void enable_kernel_spe(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	WARN_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	msr_check_and_set(MSR_SPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		check_if_tm_restore_required(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		__giveup_spe(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) EXPORT_SYMBOL(enable_kernel_spe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) void flush_spe_to_thread(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	if (tsk->thread.regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		if (tsk->thread.regs->msr & MSR_SPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 			BUG_ON(tsk != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			giveup_spe(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) #endif /* CONFIG_SPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) static unsigned long msr_all_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) static int __init init_msr_all_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (IS_ENABLED(CONFIG_PPC_FPU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		msr_all_available |= MSR_FP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		msr_all_available |= MSR_VEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	if (cpu_has_feature(CPU_FTR_VSX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		msr_all_available |= MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	if (cpu_has_feature(CPU_FTR_SPE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		msr_all_available |= MSR_SPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) early_initcall(init_msr_all_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) void giveup_all(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	unsigned long usermsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (!tsk->thread.regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	check_if_tm_restore_required(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	usermsr = tsk->thread.regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	if ((usermsr & msr_all_available) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	msr_check_and_set(msr_all_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (usermsr & MSR_FP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		__giveup_fpu(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	if (usermsr & MSR_VEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		__giveup_altivec(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	if (usermsr & MSR_SPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		__giveup_spe(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	msr_check_and_clear(msr_all_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) EXPORT_SYMBOL(giveup_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) #ifdef CONFIG_PPC_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) static bool should_restore_fp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (current->thread.load_fp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		current->thread.load_fp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) static void do_restore_fp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	load_fp_state(&current->thread.fp_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) static bool should_restore_fp(void) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) static void do_restore_fp(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) #endif /* CONFIG_PPC_FPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) static bool should_restore_altivec(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		current->thread.load_vec++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) static void do_restore_altivec(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	load_vr_state(&current->thread.vr_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	current->thread.used_vr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) static bool should_restore_altivec(void) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) static void do_restore_altivec(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) #endif /* CONFIG_ALTIVEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) static bool should_restore_vsx(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if (cpu_has_feature(CPU_FTR_VSX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) static void do_restore_vsx(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	current->thread.used_vsr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) static void do_restore_vsx(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) #endif /* CONFIG_VSX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509)  * The exception exit path calls restore_math() with interrupts hard disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510)  * but the soft irq state not "reconciled". ftrace code that calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  * local_irq_save/restore causes warnings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)  * Rather than complicate the exit path, just don't trace restore_math. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  * could be done by having ftrace entry code check for this un-reconciled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)  * condition where MSR[EE]=0 and PACA_IRQ_HARD_DIS is not set, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  * temporarily fix it up for the duration of the ftrace call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) void notrace restore_math(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	unsigned long msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	unsigned long new_msr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	msr = regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	 * new_msr tracks the facilities that are to be restored. Only reload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	 * if the bit is not set in the user MSR (if it is set, the registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	 * are live for the user thread).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	if ((!(msr & MSR_FP)) && should_restore_fp())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		new_msr |= MSR_FP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	if ((!(msr & MSR_VEC)) && should_restore_altivec())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		new_msr |= MSR_VEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	if ((!(msr & MSR_VSX)) && should_restore_vsx()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			new_msr |= MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	if (new_msr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		unsigned long fpexc_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		msr_check_and_set(new_msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		if (new_msr & MSR_FP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			do_restore_fp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			// This also covers VSX, because VSX implies FP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			fpexc_mode = current->thread.fpexc_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		if (new_msr & MSR_VEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			do_restore_altivec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		if (new_msr & MSR_VSX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			do_restore_vsx();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		msr_check_and_clear(new_msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		regs->msr |= new_msr | fpexc_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) static void save_all(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	unsigned long usermsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	if (!tsk->thread.regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	usermsr = tsk->thread.regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	if ((usermsr & msr_all_available) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	msr_check_and_set(msr_all_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	if (usermsr & MSR_FP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		save_fpu(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	if (usermsr & MSR_VEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		save_altivec(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	if (usermsr & MSR_SPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		__giveup_spe(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	msr_check_and_clear(msr_all_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	thread_pkey_regs_save(&tsk->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) void flush_all_to_thread(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	if (tsk->thread.regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		BUG_ON(tsk != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) #ifdef CONFIG_SPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		if (tsk->thread.regs->msr & MSR_SPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		save_all(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) EXPORT_SYMBOL(flush_all_to_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) #ifdef CONFIG_PPC_ADV_DEBUG_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) void do_send_trap(struct pt_regs *regs, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		  unsigned long error_code, int breakpt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	current->thread.trap_nr = TRAP_HWBKPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			11, SIGSEGV) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	/* Deliver the signal to userspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 				    (void __user *)address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) static void do_break_handler(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	struct arch_hw_breakpoint null_brk = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	struct arch_hw_breakpoint *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	struct ppc_inst instr = ppc_inst(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	int type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	unsigned long ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	 * If underneath hw supports only one watchpoint, we know it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	 * caused exception. 8xx also falls into this category.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	if (nr_wp_slots() == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		__set_breakpoint(0, &null_brk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		current->thread.hw_brk[0] = null_brk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	/* Otherwise findout which DAWR caused exception and disable it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	wp_get_instr_detail(regs, &instr, &type, &size, &ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		info = &current->thread.hw_brk[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		if (!info->address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		if (wp_check_constraints(regs, instr, ea, type, size, info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			__set_breakpoint(i, &null_brk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			current->thread.hw_brk[i] = null_brk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) void do_break (struct pt_regs *regs, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		    unsigned long error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	current->thread.trap_nr = TRAP_HWBKPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			11, SIGSEGV) == NOTIFY_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	if (debugger_break_match(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	 * We reach here only when watchpoint exception is generated by ptrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	 * event (or hw is buggy!). Now if CONFIG_HAVE_HW_BREAKPOINT is set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	 * watchpoint is already handled by hw_breakpoint_handler() so we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	 * have to do anything. But when CONFIG_HAVE_HW_BREAKPOINT is not set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	 * we need to manually handle the watchpoint here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	if (!IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		do_break_handler(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	/* Deliver the signal to userspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) #ifdef CONFIG_PPC_ADV_DEBUG_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693)  * Set the debug registers back to their default "safe" values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) static void set_debug_reg_defaults(struct thread_struct *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	thread->debug.iac1 = thread->debug.iac2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) #if CONFIG_PPC_ADV_DEBUG_IACS > 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	thread->debug.iac3 = thread->debug.iac4 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	thread->debug.dac1 = thread->debug.dac2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	thread->debug.dvc1 = thread->debug.dvc2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	thread->debug.dbcr0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			DBCR1_IAC3US | DBCR1_IAC4US;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	 * Force Data Address Compare User/Supervisor bits to be User-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	thread->debug.dbcr1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) static void prime_debug_regs(struct debug_reg *debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	 * We could have inherited MSR_DE from userspace, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	 * it doesn't get cleared on exception entry.  Make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	 * MSR_DE is clear before we enable any debug events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	mtmsr(mfmsr() & ~MSR_DE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	mtspr(SPRN_IAC1, debug->iac1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	mtspr(SPRN_IAC2, debug->iac2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) #if CONFIG_PPC_ADV_DEBUG_IACS > 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	mtspr(SPRN_IAC3, debug->iac3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	mtspr(SPRN_IAC4, debug->iac4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	mtspr(SPRN_DAC1, debug->dac1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	mtspr(SPRN_DAC2, debug->dac2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	mtspr(SPRN_DVC1, debug->dvc1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	mtspr(SPRN_DVC2, debug->dvc2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	mtspr(SPRN_DBCR0, debug->dbcr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	mtspr(SPRN_DBCR1, debug->dbcr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	mtspr(SPRN_DBCR2, debug->dbcr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  * Unless neither the old or new thread are making use of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  * debug registers, set the debug registers from the values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  * stored in the new thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) void switch_booke_debug_regs(struct debug_reg *new_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	if ((current->thread.debug.dbcr0 & DBCR0_IDM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		|| (new_debug->dbcr0 & DBCR0_IDM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 			prime_debug_regs(new_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) #ifndef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) static void set_breakpoint(int i, struct arch_hw_breakpoint *brk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	__set_breakpoint(i, brk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) static void set_debug_reg_defaults(struct thread_struct *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	struct arch_hw_breakpoint null_brk = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		thread->hw_brk[i] = null_brk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		if (ppc_breakpoint_available())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			set_breakpoint(i, &thread->hw_brk[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 				struct arch_hw_breakpoint *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	if (a->address != b->address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	if (a->type != b->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	if (a->len != b->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	/* no need to check hw_len. it's calculated from address and len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) static void switch_hw_breakpoint(struct task_struct *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	for (i = 0; i < nr_wp_slots(); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		if (likely(hw_brk_match(this_cpu_ptr(&current_brk[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 					&new->thread.hw_brk[i])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		__set_breakpoint(i, &new->thread.hw_brk[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) #ifdef CONFIG_PPC_ADV_DEBUG_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	mtspr(SPRN_DAC1, dabr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	if (IS_ENABLED(CONFIG_PPC_47x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		isync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) #elif defined(CONFIG_PPC_BOOK3S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	mtspr(SPRN_DABR, dabr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	if (cpu_has_feature(CPU_FTR_DABRX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		mtspr(SPRN_DABRX, dabrx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) static inline int set_dabr(struct arch_hw_breakpoint *brk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	unsigned long dabr, dabrx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	dabrx = ((brk->type >> 3) & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (ppc_md.set_dabr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		return ppc_md.set_dabr(dabr, dabrx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	return __set_dabr(dabr, dabrx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			       LCTRL1_CRWF_RW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	if (start_addr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		lctrl2 |= LCTRL2_LW0LA_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	else if (end_addr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		lctrl2 |= LCTRL2_LW0LA_E;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		lctrl2 |= LCTRL2_LW0LA_EandF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	mtspr(SPRN_LCTRL2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	if ((brk->type & HW_BRK_TYPE_RDWR) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		lctrl1 |= LCTRL1_CRWE_RO | LCTRL1_CRWF_RO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	mtspr(SPRN_CMPE, start_addr - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	mtspr(SPRN_CMPF, end_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	mtspr(SPRN_LCTRL1, lctrl1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	mtspr(SPRN_LCTRL2, lctrl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	memcpy(this_cpu_ptr(&current_brk[nr]), brk, sizeof(*brk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (dawr_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		// Power8 or later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		set_dawr(nr, brk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	else if (IS_ENABLED(CONFIG_PPC_8xx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		set_breakpoint_8xx(brk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		// Power7 or earlier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		set_dabr(brk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		// Shouldn't happen due to higher level checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) /* Check if we have DAWR or DABR hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) bool ppc_breakpoint_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (dawr_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		return true; /* POWER8 DAWR or POWER9 forced DAWR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		return false; /* POWER9 with DAWR disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	/* DABR: Everything but POWER8 and POWER9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) static inline bool tm_enabled(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	 * Use the current MSR TM suspended bit to track if we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	 * checkpointed state outstanding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	 * On signal delivery, we'd normally reclaim the checkpointed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	 * state to obtain stack pointer (see:get_tm_stackpointer()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	 * This will then directly return to userspace without going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	 * through __switch_to(). However, if the stack frame is bad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	 * we need to exit this thread which calls __switch_to() which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	 * will again attempt to reclaim the already saved tm state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	 * Hence we need to check that we've not already reclaimed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	 * this state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	 * We do this using the current MSR, rather tracking it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	 * some specific thread_struct bit, as it has the additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	 * benefit of checking for a potential TM bad thing exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	if (!MSR_TM_SUSPENDED(mfmsr()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	giveup_all(container_of(thr, struct task_struct, thread));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	tm_reclaim(thr, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	 * If we are in a transaction and FP is off then we can't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	 * used FP inside that transaction. Hence the checkpointed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	 * state is the same as the live state. We need to copy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	 * live state to the checkpointed state so that when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	 * transaction is restored, the checkpointed state is correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	 * and the aborted transaction sees the correct state. We use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	 * ckpt_regs.msr here as that's what tm_reclaim will use to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	 * determine if it's going to write the checkpointed state or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	 * not. So either this will write the checkpointed registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	 * or reclaim will. Similarly for VMX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	if ((thr->ckpt_regs.msr & MSR_FP) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		memcpy(&thr->ckfp_state, &thr->fp_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		       sizeof(struct thread_fp_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		memcpy(&thr->ckvr_state, &thr->vr_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		       sizeof(struct thread_vr_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) void tm_reclaim_current(uint8_t cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	tm_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	tm_reclaim_thread(&current->thread, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) static inline void tm_reclaim_task(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	/* We have to work out if we're switching from/to a task that's in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	 * middle of a transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 * In switching we need to maintain a 2nd register state as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 * oldtask->thread.ckpt_regs.  We tm_reclaim(oldproc); this saves the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	 * ckvr_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	struct thread_struct *thr = &tsk->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	if (!thr->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	if (!MSR_TM_ACTIVE(thr->regs->msr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		goto out_and_saveregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	WARN_ON(tm_suspend_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		 "ccr=%lx, msr=%lx, trap=%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		 tsk->pid, thr->regs->nip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		 thr->regs->ccr, thr->regs->msr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		 thr->regs->trap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	TM_DEBUG("--- tm_reclaim on pid %d complete\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		 tsk->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) out_and_saveregs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	/* Always save the regs here, even if a transaction's not active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	 * This context-switches a thread's TM info SPRs.  We do it here to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	 * be consistent with the restore path (in recheckpoint) which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	 * cannot happen later in _switch().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	tm_save_sprs(thr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) extern void __tm_recheckpoint(struct thread_struct *thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) void tm_recheckpoint(struct thread_struct *thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	if (!(thread->regs->msr & MSR_TM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	/* We really can't be interrupted here as the TEXASR registers can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	 * change and later in the trecheckpoint code, we have a userspace R1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	 * So let's hard disable over this region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	hard_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	/* The TM SPRs are restored here, so that TEXASR.FS can be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	 * before the trecheckpoint and no explosion occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	tm_restore_sprs(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	__tm_recheckpoint(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) static inline void tm_recheckpoint_new_task(struct task_struct *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	if (!cpu_has_feature(CPU_FTR_TM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	/* Recheckpoint the registers of the thread we're about to switch to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	 * If the task was using FP, we non-lazily reload both the original and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	 * the speculative FP register states.  This is because the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	 * doesn't see if/when a TM rollback occurs, so if we take an FP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	 * unavailable later, we are unable to determine which set of FP regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	 * need to be restored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	if (!tm_enabled(new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		tm_restore_sprs(&new->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	/* Recheckpoint to restore original checkpointed register state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		 new->pid, new->thread.regs->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	tm_recheckpoint(&new->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	 * The checkpointed state has been restored but the live state has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	 * not, ensure all the math functionality is turned off to trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	 * restore_math() to reload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	TM_DEBUG("*** tm_recheckpoint of pid %d complete "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		 "(kernel msr 0x%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		 new->pid, mfmsr());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static inline void __switch_to_tm(struct task_struct *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		struct task_struct *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	if (cpu_has_feature(CPU_FTR_TM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		if (tm_enabled(prev) || tm_enabled(new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			tm_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		if (tm_enabled(prev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 			prev->thread.load_tm++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 			tm_reclaim_task(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 			if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 				prev->thread.regs->msr &= ~MSR_TM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		tm_recheckpoint_new_task(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)  * This is called if we are on the way out to userspace and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)  * TIF_RESTORE_TM flag is set.  It checks if we need to reload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)  * FP and/or vector state and does so if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)  * If userspace is inside a transaction (whether active or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)  * suspended) and FP/VMX/VSX instructions have ever been enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)  * inside that transaction, then we have to keep them enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)  * and keep the FP/VMX/VSX state loaded while ever the transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)  * continues.  The reason is that if we didn't, and subsequently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)  * got a FP/VMX/VSX unavailable interrupt inside a transaction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)  * we don't know whether it's the same transaction, and thus we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)  * don't know which of the checkpointed state and the transactional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)  * state to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) void restore_tm_state(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	unsigned long msr_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	 * This is the only moment we should clear TIF_RESTORE_TM as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	 * it is here that ckpt_regs.msr and pt_regs.msr become the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	 * again, anything else could lead to an incorrect ckpt_msr being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	 * saved and therefore incorrect signal contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	clear_thread_flag(TIF_RESTORE_TM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if (!MSR_TM_ACTIVE(regs->msr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	/* Ensure that restore_math() will restore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	if (msr_diff & MSR_FP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		current->thread.load_fp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		current->thread.load_vec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	restore_math(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	regs->msr |= msr_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) #define tm_recheckpoint_new_task(new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) #define __switch_to_tm(prev, new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static inline void save_sprs(struct thread_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		t->vrsave = mfspr(SPRN_VRSAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	if (cpu_has_feature(CPU_FTR_DSCR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		t->dscr = mfspr(SPRN_DSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		t->bescr = mfspr(SPRN_BESCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		t->ebbhr = mfspr(SPRN_EBBHR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		t->ebbrr = mfspr(SPRN_EBBRR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		t->fscr = mfspr(SPRN_FSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		 * Note that the TAR is not available for use in the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		 * (To provide this, the TAR should be backed up/restored on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		 * exception entry/exit instead, and be in pt_regs.  FIXME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		 * this should be in pt_regs anyway (for debug).)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		t->tar = mfspr(SPRN_TAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	thread_pkey_regs_save(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static inline void restore_sprs(struct thread_struct *old_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 				struct thread_struct *new_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	    old_thread->vrsave != new_thread->vrsave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		mtspr(SPRN_VRSAVE, new_thread->vrsave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	if (cpu_has_feature(CPU_FTR_DSCR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		u64 dscr = get_paca()->dscr_default;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		if (new_thread->dscr_inherit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 			dscr = new_thread->dscr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		if (old_thread->dscr != dscr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			mtspr(SPRN_DSCR, dscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		if (old_thread->bescr != new_thread->bescr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 			mtspr(SPRN_BESCR, new_thread->bescr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		if (old_thread->ebbhr != new_thread->ebbhr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			mtspr(SPRN_EBBHR, new_thread->ebbhr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		if (old_thread->ebbrr != new_thread->ebbrr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 			mtspr(SPRN_EBBRR, new_thread->ebbrr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		if (old_thread->fscr != new_thread->fscr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			mtspr(SPRN_FSCR, new_thread->fscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		if (old_thread->tar != new_thread->tar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 			mtspr(SPRN_TAR, new_thread->tar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	    old_thread->tidr != new_thread->tidr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		mtspr(SPRN_TIDR, new_thread->tidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	thread_pkey_regs_restore(new_thread, old_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct task_struct *__switch_to(struct task_struct *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	struct task_struct *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	struct thread_struct *new_thread, *old_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	struct task_struct *last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	struct ppc64_tlb_batch *batch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	new_thread = &new->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	old_thread = &current->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	WARN_ON(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	batch = this_cpu_ptr(&ppc64_tlb_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	if (batch->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		current_thread_info()->local_flags |= _TLF_LAZY_MMU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		if (batch->index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			__flush_tlb_pending(batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		batch->active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	 * On POWER9 the copy-paste buffer can only paste into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	 * foreign real addresses, so unprivileged processes can not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	 * see the data or use it in any way unless they have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	 * foreign real mappings. If the new process has the foreign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	 * real address mappings, we must issue a cp_abort to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	 * any state and prevent snooping, corruption or a covert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	 * channel. ISA v3.1 supports paste into local memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 			atomic_read(&new->mm->context.vas_windows)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		asm volatile(PPC_CP_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) #ifdef CONFIG_PPC_ADV_DEBUG_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	switch_booke_debug_regs(&new->thread.debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)  * schedule DABR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) #ifndef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	switch_hw_breakpoint(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) #endif /* CONFIG_HAVE_HW_BREAKPOINT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	 * We need to save SPRs before treclaim/trecheckpoint as these will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	 * change a number of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	save_sprs(&prev->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	/* Save FPU, Altivec, VSX and SPE state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	giveup_all(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	__switch_to_tm(prev, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	if (!radix_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		 * We can't take a PMU exception inside _switch() since there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		 * is a window where the kernel stack SLB and the kernel stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		 * are out of sync. Hard disable here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		hard_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	 * Call restore_sprs() before calling _switch(). If we move it after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	 * _switch() then we miss out on calling it for new tasks. The reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	 * for this is we manually create a stack frame for new tasks that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	 * directly returns through ret_from_fork() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	 * ret_from_kernel_thread(). See copy_thread() for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	restore_sprs(old_thread, new_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	last = _switch(old_thread, new_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	 * Nothing after _switch will be run for newly created tasks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	 * because they switch directly to ret_from_fork/ret_from_kernel_thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	 * etc. Code added here should have a comment explaining why that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	 * okay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	 * This applies to a process that was context switched while inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	 * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	 * deactivated above, before _switch(). This will never be the case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	 * for new tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		batch = this_cpu_ptr(&ppc64_tlb_batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		batch->active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	 * Math facilities are masked out of the child MSR in copy_thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	 * A new task does not need to restore_math because it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	 * demand fault them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if (current->thread.regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		restore_math(current->thread.regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	return last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) #define NR_INSN_TO_PRINT	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) static void show_instructions(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	unsigned long nip = regs->nip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	printk("Instruction dump:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	 * If we were executing with the MMU off for instructions, adjust pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	 * rather than printing XXXXXXXX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		pc = (unsigned long)phys_to_virt(pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		nip = (unsigned long)phys_to_virt(regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	for (i = 0; i < NR_INSN_TO_PRINT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		int instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		if (!(i % 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 			pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		if (!__kernel_text_address(pc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		    get_kernel_nofault(instr, (const void *)pc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 			pr_cont("XXXXXXXX ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 			if (nip == pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 				pr_cont("<%08x> ", instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 				pr_cont("%08x ", instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		pc += sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) void show_user_instructions(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	unsigned long pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	int n = NR_INSN_TO_PRINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	struct seq_buf s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	char buf[96]; /* enough for 8 times 9 + 2 chars */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	seq_buf_init(&s, buf, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		seq_buf_clear(&s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			int instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			if (copy_from_user_nofault(&instr, (void __user *)pc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 					sizeof(instr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 				seq_buf_printf(&s, "XXXXXXXX ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 			seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		if (!seq_buf_has_overflowed(&s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 			pr_info("%s[%d]: code: %s\n", current->comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 				current->pid, s.buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) struct regbit {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	unsigned long bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) static struct regbit msr_bits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	{MSR_SF,	"SF"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	{MSR_HV,	"HV"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	{MSR_VEC,	"VEC"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	{MSR_VSX,	"VSX"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	{MSR_CE,	"CE"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	{MSR_EE,	"EE"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	{MSR_PR,	"PR"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	{MSR_FP,	"FP"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	{MSR_ME,	"ME"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) #ifdef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	{MSR_DE,	"DE"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	{MSR_SE,	"SE"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	{MSR_BE,	"BE"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	{MSR_IR,	"IR"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	{MSR_DR,	"DR"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	{MSR_PMM,	"PMM"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) #ifndef CONFIG_BOOKE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	{MSR_RI,	"RI"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	{MSR_LE,	"LE"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	{0,		NULL}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	const char *s = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	for (; bits->bit; ++bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		if (val & bits->bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 			pr_cont("%s%s", s, bits->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 			s = sep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) static struct regbit msr_tm_bits[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	{MSR_TS_T,	"T"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	{MSR_TS_S,	"S"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	{MSR_TM,	"E"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	{0,		NULL}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) static void print_tm_bits(unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)  * This only prints something if at least one of the TM bit is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)  * Inside the TM[], the output means:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)  *   E: Enabled		(bit 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)  *   S: Suspended	(bit 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)  *   T: Transactional	(bit 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		pr_cont(",TM[");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		print_bits(val, msr_tm_bits, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		pr_cont("]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) static void print_tm_bits(unsigned long val) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static void print_msr_bits(unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	pr_cont("<");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	print_bits(val, msr_bits, ",");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	print_tm_bits(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	pr_cont(">");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) #define REG		"%016lx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) #define REGS_PER_LINE	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) #define LAST_VOLATILE	13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) #define REG		"%08lx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) #define REGS_PER_LINE	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) #define LAST_VOLATILE	12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) void show_regs(struct pt_regs * regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	int i, trap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	show_regs_print_info(KERN_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	printk("NIP:  "REG" LR: "REG" CTR: "REG"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	       regs->nip, regs->link, regs->ctr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	printk("REGS: %px TRAP: %04lx   %s  (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	       regs, regs->trap, print_tainted(), init_utsname()->release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	printk("MSR:  "REG" ", regs->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	print_msr_bits(regs->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	pr_cont("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	trap = TRAP(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		pr_cont("CFAR: "REG" ", regs->orig_gpr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	if (trap == 0x200 || trap == 0x300 || trap == 0x600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 			pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 			pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	pr_cont("IRQMASK: %lx ", regs->softe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	if (MSR_TM_ACTIVE(regs->msr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	for (i = 0;  i < 32;  i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		if ((i % REGS_PER_LINE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 			pr_cont("\nGPR%02d: ", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		pr_cont(REG " ", regs->gpr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		if (i == LAST_VOLATILE && !FULL_REGS(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	 * Lookup NIP late so we have the best change of getting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	 * above info out without failing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	if (IS_ENABLED(CONFIG_KALLSYMS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		show_instructions(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) void flush_thread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) #ifdef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	flush_ptrace_hw_breakpoint(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) #else /* CONFIG_HAVE_HW_BREAKPOINT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	set_debug_reg_defaults(&current->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) #endif /* CONFIG_HAVE_HW_BREAKPOINT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) void arch_setup_new_exec(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	if (radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	hash__setup_new_exec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)  * Assign a TIDR (thread ID) for task @t and set it in the thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)  * structure. For now, we only support setting TIDR for 'current' task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)  * Since the TID value is a truncated form of it PID, it is possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)  * (but unlikely) for 2 threads to have the same TID. In the unlikely event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)  * that 2 threads share the same TID and are waiting, one of the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)  * cases will happen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)  * 1. The correct thread is running, the wrong thread is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)  * In this situation, the correct thread is woken and proceeds to pass it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)  * condition check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)  * 2. Neither threads are running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)  * In this situation, neither thread will be woken. When scheduled, the waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)  * threads will execute either a wait, which will return immediately, followed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)  * by a condition check, which will pass for the correct thread and fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)  * for the wrong thread, or they will execute the condition check immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)  * 3. The wrong thread is running, the correct thread is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)  * The wrong thread will be woken, but will fail it's condition check and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)  * re-execute wait. The correct thread, when scheduled, will execute either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)  * it's condition check (which will pass), or wait, which returns immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)  * when called the first time after the thread is scheduled, followed by it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)  * condition check (which will pass).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)  * 4. Both threads are running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)  * Both threads will be woken. The wrong thread will fail it's condition check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)  * and execute another wait, while the correct thread will pass it's condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)  * check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)  * @t: the task to set the thread ID for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) int set_thread_tidr(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	if (!cpu_has_feature(CPU_FTR_P9_TIDR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	if (t != current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	if (t->thread.tidr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	t->thread.tidr = (u16)task_pid_nr(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	mtspr(SPRN_TIDR, t->thread.tidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) EXPORT_SYMBOL_GPL(set_thread_tidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) release_thread(struct task_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)  * this gets called so that we can store coprocessor state into memory and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)  * copy the current task into the new thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	flush_all_to_thread(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	 * Flush TM state out so we can copy it.  __switch_to_tm() does this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	 * flush but it removes the checkpointed state from the current CPU and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	 * transitions the CPU out of TM mode.  Hence we need to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	 * tm_recheckpoint_new_task() (on the same task) to restore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	 * checkpointed state back and the TM mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	 * Can't pass dst because it isn't ready. Doesn't matter, passing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	 * dst is only important for __switch_to()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	__switch_to_tm(src, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	*dst = *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	clear_task_ebb(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	unsigned long sp_vsid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	if (radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 			<< SLB_VSID_SHIFT_1T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 			<< SLB_VSID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	sp_vsid |= SLB_VSID_KERNEL | llp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	p->thread.ksp_vsid = sp_vsid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)  * Copy a thread..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)  * Copy architecture-specific thread state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) int copy_thread(unsigned long clone_flags, unsigned long usp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 		unsigned long kthread_arg, struct task_struct *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		unsigned long tls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	struct pt_regs *childregs, *kregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	extern void ret_from_fork(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	extern void ret_from_fork_scv(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	extern void ret_from_kernel_thread(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	void (*f)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	struct thread_info *ti = task_thread_info(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) #ifdef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	klp_init_thread_info(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	/* Copy registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	sp -= sizeof(struct pt_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	childregs = (struct pt_regs *) sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	if (unlikely(p->flags & PF_KTHREAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		/* kernel thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		memset(childregs, 0, sizeof(struct pt_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		childregs->gpr[1] = sp + sizeof(struct pt_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		/* function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		if (usp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			childregs->gpr[14] = ppc_function_entry((void *)usp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		clear_tsk_thread_flag(p, TIF_32BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		childregs->softe = IRQS_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		childregs->gpr[15] = kthread_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		p->thread.regs = NULL;	/* no user register state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		ti->flags |= _TIF_RESTOREALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		f = ret_from_kernel_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		/* user thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		struct pt_regs *regs = current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		CHECK_FULL_REGS(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		*childregs = *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		if (usp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			childregs->gpr[1] = usp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		p->thread.regs = childregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		/* 64s sets this in ret_from_fork */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 		if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 			childregs->gpr[3] = 0;  /* Result from fork() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		if (clone_flags & CLONE_SETTLS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 			if (!is_32bit_task())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 				childregs->gpr[13] = tls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 				childregs->gpr[2] = tls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		if (trap_is_scv(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 			f = ret_from_fork_scv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 			f = ret_from_fork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	sp -= STACK_FRAME_OVERHEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	 * The way this works is that at some point in the future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	 * some task will call _switch to switch to the new task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	 * That will pop off the stack frame created below and start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	 * the new task running at ret_from_fork.  The new task will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	 * do some house keeping and then return from the fork or clone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	 * system call, using the stack frame created above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	((unsigned long *)sp)[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	sp -= sizeof(struct pt_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	kregs = (struct pt_regs *) sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	sp -= STACK_FRAME_OVERHEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	p->thread.ksp = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) #ifdef CONFIG_PPC32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	p->thread.ksp_limit = (unsigned long)end_of_stack(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) #ifdef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	for (i = 0; i < nr_wp_slots(); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		p->thread.ptrace_bps[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	p->thread.fp_save_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	p->thread.vr_save_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	setup_ksp_vsid(p, sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) #ifdef CONFIG_PPC64 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	if (cpu_has_feature(CPU_FTR_DSCR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		p->thread.dscr_inherit = current->thread.dscr_inherit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		p->thread.dscr = mfspr(SPRN_DSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	if (cpu_has_feature(CPU_FTR_HAS_PPR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		childregs->ppr = DEFAULT_PPR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	p->thread.tidr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	kregs->nip = ppc_function_entry(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) void preload_new_slb_context(unsigned long start, unsigned long sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)  * Set up a thread for executing a new program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	unsigned long load_addr = regs->gpr[2];	/* saved by ELF_PLAT_INIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		preload_new_slb_context(start, sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	 * If we exec out of a kernel thread then thread.regs will not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	 * set.  Do it now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	if (!current->thread.regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		current->thread.regs = regs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	 * Clear any transactional state, we're exec()ing. The cause is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	 * not important as there will never be a recheckpoint so it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	 * user visible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	if (MSR_TM_SUSPENDED(mfmsr()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		tm_reclaim_current(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	memset(regs->gpr, 0, sizeof(regs->gpr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	regs->ctr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	regs->link = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	regs->xer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	regs->ccr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	regs->gpr[1] = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	 * We have just cleared all the nonvolatile GPRs, so make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	 * FULL_REGS(regs) return true.  This is necessary to allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	 * ptrace to examine the thread immediately after exec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	SET_FULL_REGS(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) #ifdef CONFIG_PPC32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	regs->mq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	regs->nip = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	regs->msr = MSR_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	if (!is_32bit_task()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		unsigned long entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		if (is_elf2_task()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 			/* Look ma, no function descriptors! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 			entry = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 			 * Ulrich says:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 			 *   The latest iteration of the ABI requires that when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 			 *   calling a function (at its global entry point),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 			 *   the caller must ensure r12 holds the entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 			 *   address (so that the function can quickly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 			 *   establish addressability).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 			regs->gpr[12] = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 			/* Make sure that's restored on entry to userspace. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 			set_thread_flag(TIF_RESTOREALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 			unsigned long toc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 			/* start is a relocated pointer to the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 			 * descriptor for the elf _start routine.  The first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 			 * entry in the function descriptor is the entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 			 * address of _start and the second entry is the TOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 			 * value we need to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 			__get_user(entry, (unsigned long __user *)start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			__get_user(toc, (unsigned long __user *)start+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 			/* Check whether the e_entry function descriptor entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 			 * need to be relocated before we can use them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 			if (load_addr != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 				entry += load_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 				toc   += load_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 			regs->gpr[2] = toc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		regs->nip = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		regs->msr = MSR_USER64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		regs->nip = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		regs->gpr[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		regs->msr = MSR_USER32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	current->thread.used_vsr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	current->thread.load_slb = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	current->thread.load_fp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	current->thread.fp_save_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	current->thread.vr_save_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	current->thread.vrsave = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	current->thread.used_vr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	current->thread.load_vec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) #endif /* CONFIG_ALTIVEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) #ifdef CONFIG_SPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	memset(current->thread.evr, 0, sizeof(current->thread.evr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	current->thread.acc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	current->thread.spefscr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	current->thread.used_spe = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) #endif /* CONFIG_SPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	current->thread.tm_tfhar = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	current->thread.tm_texasr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	current->thread.tm_tfiar = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	current->thread.load_tm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	thread_pkey_regs_init(&current->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) EXPORT_SYMBOL(start_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		| PR_FP_EXC_RES | PR_FP_EXC_INV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	struct pt_regs *regs = tsk->thread.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	/* This is a bit hairy.  If we are an SPE enabled  processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	 * (have embedded fp) we store the IEEE exception enable flags in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	 * fpexc_mode.  fpexc_mode is also used for setting FP exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	 * mode (asyn, precise, disabled) for 'Classic' FP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	if (val & PR_FP_EXC_SW_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		if (cpu_has_feature(CPU_FTR_SPE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 			 * When the sticky exception bits are set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 			 * directly by userspace, it must call prctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 			 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 			 * in the existing prctl settings) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 			 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			 * the bits being set).  <fenv.h> functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 			 * saving and restoring the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 			 * floating-point environment need to do so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 			 * anyway to restore the prctl settings from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 			 * the saved environment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) #ifdef CONFIG_SPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 			tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			tsk->thread.fpexc_mode = val &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 				(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	/* on a CONFIG_SPE this does not hurt us.  The bits that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	 * __pack_fe01 use do not overlap with bits used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	 * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	 * on CONFIG_SPE implementations are reserved so writing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	 * them does not change anything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	if (val > PR_FP_EXC_PRECISE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	tsk->thread.fpexc_mode = __pack_fe01(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	if (regs != NULL && (regs->msr & MSR_FP) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 		regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 			| tsk->thread.fpexc_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	unsigned int val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		if (cpu_has_feature(CPU_FTR_SPE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 			 * When the sticky exception bits are set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 			 * directly by userspace, it must call prctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 			 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 			 * in the existing prctl settings) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 			 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 			 * the bits being set).  <fenv.h> functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 			 * saving and restoring the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 			 * floating-point environment need to do so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 			 * anyway to restore the prctl settings from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 			 * the saved environment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) #ifdef CONFIG_SPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 			tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 			val = tsk->thread.fpexc_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		val = __unpack_fe01(tsk->thread.fpexc_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	return put_user(val, (unsigned int __user *) adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) int set_endian(struct task_struct *tsk, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	struct pt_regs *regs = tsk->thread.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	    (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	if (regs == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	if (val == PR_ENDIAN_BIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		regs->msr &= ~MSR_LE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		regs->msr |= MSR_LE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) int get_endian(struct task_struct *tsk, unsigned long adr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	struct pt_regs *regs = tsk->thread.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	    !cpu_has_feature(CPU_FTR_REAL_LE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	if (regs == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	if (regs->msr & MSR_LE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 		if (cpu_has_feature(CPU_FTR_REAL_LE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 			val = PR_ENDIAN_LITTLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 			val = PR_ENDIAN_PPC_LITTLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		val = PR_ENDIAN_BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	return put_user(val, (unsigned int __user *)adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	tsk->thread.align_ctl = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 				  unsigned long nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	unsigned long stack_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	unsigned long cpu = task_cpu(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	stack_page = (unsigned long)hardirq_ctx[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	stack_page = (unsigned long)softirq_ctx[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 					unsigned long nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	unsigned long stack_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	unsigned long cpu = task_cpu(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) # ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) # endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) int validate_sp(unsigned long sp, struct task_struct *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		       unsigned long nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	unsigned long stack_page = (unsigned long)task_stack_page(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	if (sp < THREAD_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	if (valid_irq_stack(sp, p, nbytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	return valid_emergency_stack(sp, p, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) EXPORT_SYMBOL(validate_sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) static unsigned long __get_wchan(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	unsigned long ip, sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	if (!p || p == current || p->state == TASK_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	sp = p->thread.ksp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		sp = *(unsigned long *)sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		    p->state == TASK_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		if (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 			ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 			if (!in_sched_functions(ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 				return ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	} while (count++ < 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) unsigned long get_wchan(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	if (!try_get_task_stack(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	ret = __get_wchan(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	put_task_stack(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) void show_stack(struct task_struct *tsk, unsigned long *stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	unsigned long sp, ip, lr, newsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	int firstframe = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	unsigned long ret_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	int ftrace_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	if (tsk == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	if (!try_get_task_stack(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	sp = (unsigned long) stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	if (sp == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 		if (tsk == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 			sp = current_stack_frame();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 			sp = tsk->thread.ksp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	lr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	printk("%sCall Trace:\n", loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 		if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 		stack = (unsigned long *) sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		newsp = stack[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		ip = stack[STACK_FRAME_LR_SAVE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		if (!firstframe || ip != lr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 			printk("%s["REG"] ["REG"] %pS",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 				loglvl, sp, ip, (void *)ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 			ret_addr = ftrace_graph_ret_addr(current,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 						&ftrace_idx, ip, stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 			if (ret_addr != ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 				pr_cont(" (%pS)", (void *)ret_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 			if (firstframe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 				pr_cont(" (unreliable)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 			pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 		firstframe = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 		 * See if this is an exception frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		 * We look for the "regshere" marker in the current frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		    && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 			struct pt_regs *regs = (struct pt_regs *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 				(sp + STACK_FRAME_OVERHEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 			lr = regs->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 			printk("%s--- interrupt: %lx at %pS\n    LR = %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 			       loglvl, regs->trap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 			       (void *)regs->nip, (void *)lr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 			firstframe = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		sp = newsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	} while (count++ < kstack_depth_to_print);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	put_task_stack(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) /* Called with hard IRQs off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) void notrace __ppc64_runlatch_on(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	struct thread_info *ti = current_thread_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	if (cpu_has_feature(CPU_FTR_ARCH_206)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 		 * Least significant bit (RUN) is the only writable bit of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		 * the CTRL register, so we can avoid mfspr. 2.06 is not the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		 * earliest ISA where this is the case, but it's convenient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		unsigned long ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		 * Some architectures (e.g., Cell) have writable fields other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 		 * than RUN, so do the read-modify-write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		ctrl = mfspr(SPRN_CTRLF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		ctrl |= CTRL_RUNLATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		mtspr(SPRN_CTRLT, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	ti->local_flags |= _TLF_RUNLATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) /* Called with hard IRQs off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) void notrace __ppc64_runlatch_off(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	struct thread_info *ti = current_thread_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	ti->local_flags &= ~_TLF_RUNLATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	if (cpu_has_feature(CPU_FTR_ARCH_206)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		mtspr(SPRN_CTRLT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		unsigned long ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		ctrl = mfspr(SPRN_CTRLF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		ctrl &= ~CTRL_RUNLATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		mtspr(SPRN_CTRLT, ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) unsigned long arch_align_stack(unsigned long sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 		sp -= get_random_int() & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	return sp & ~0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) static inline unsigned long brk_rnd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)         unsigned long rnd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	/* 8MB for 32bit, 1GB for 64bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	if (is_32bit_task())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	return rnd << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) unsigned long arch_randomize_brk(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	unsigned long base = mm->brk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	 * If we are using 1TB segments and we are allowed to randomise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	 * the heap, we can put it above 1TB so it is backed by a 1TB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	 * segment. Otherwise the heap will be in the bottom 1TB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	 * which always uses 256MB segments and this may result in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	 * performance penalty. We don't need to worry about radix. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	 * radix, mmu_highuser_ssize remains unchanged from 256MB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	ret = PAGE_ALIGN(base + brk_rnd());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	if (ret < mm->brk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		return mm->brk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)