Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * arch/xtensa/kernel/process.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Xtensa Processor version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * License.  See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Copyright (C) 2001 - 2005 Tensilica Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * Chris Zankel <chris@zankel.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * Kevin Chea
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/prctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/init_task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/mqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #include <asm/platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #include <asm/regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #include <asm/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) extern void ret_from_fork(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) extern void ret_from_kernel_thread(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) void (*pm_power_off)(void) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) EXPORT_SYMBOL(pm_power_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #ifdef CONFIG_STACKPROTECTOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #include <linux/stackprotector.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) unsigned long __stack_chk_guard __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) EXPORT_SYMBOL(__stack_chk_guard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #if XTENSA_HAVE_COPROCESSORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) void coprocessor_release_all(struct thread_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	unsigned long cpenable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	/* Make sure we don't switch tasks during this operation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	/* Walk through all cp owners and release it for the requested one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	cpenable = ti->cpenable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	for (i = 0; i < XCHAL_CP_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		if (coprocessor_owner[i] == ti) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 			coprocessor_owner[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 			cpenable &= ~(1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	ti->cpenable = cpenable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	if (ti == current_thread_info())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		xtensa_set_sr(0, cpenable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) void coprocessor_flush_all(struct thread_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	unsigned long cpenable, old_cpenable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	old_cpenable = xtensa_get_sr(cpenable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	cpenable = ti->cpenable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	xtensa_set_sr(cpenable, cpenable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	for (i = 0; i < XCHAL_CP_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			coprocessor_flush(ti, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		cpenable >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	xtensa_set_sr(old_cpenable, cpenable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  * Powermanagement idle function, if any is provided by the platform.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) void arch_cpu_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	platform_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * This is called when the thread calls exit().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void exit_thread(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #if XTENSA_HAVE_COPROCESSORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	coprocessor_release_all(task_thread_info(tsk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * Flush thread state. This is called when a thread does an execve()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * Note that we flush coprocessor registers for the case execve fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void flush_thread(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #if XTENSA_HAVE_COPROCESSORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	struct thread_info *ti = current_thread_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	coprocessor_flush_all(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	coprocessor_release_all(ti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	flush_ptrace_hw_breakpoint(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  * this gets called so that we can store coprocessor state into memory and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  * copy the current task into the new thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #if XTENSA_HAVE_COPROCESSORS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	coprocessor_flush_all(task_thread_info(src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	*dst = *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * Copy thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * There are two modes in which this function is called:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * 1) Userspace thread creation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *    regs != NULL, usp_thread_fn is userspace stack pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  *    It is expected to copy parent regs (in case CLONE_VM is not set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  *    in the clone_flags) and set up passed usp in the childregs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * 2) Kernel thread creation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  *    regs == NULL, usp_thread_fn is the function to run in the new thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  *    and thread_fn_arg is its parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  *    childregs are not used for the kernel threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  * The stack layout for the new thread looks like this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  *	+------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  *	|       childregs        |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  *	+------------------------+ <- thread.sp = sp in dummy-frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  *	|      dummy-frame       |    (saved in dummy-frame spill-area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  *	+------------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * We create a dummy frame to return to either ret_from_fork or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  *   ret_from_kernel_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  *   a0 points to ret_from_fork/ret_from_kernel_thread (simulating a call4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  *   sp points to itself (thread.sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  *   a2, a3 are unused for userspace threads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  *   a2 points to thread_fn, a3 holds thread_fn arg for kernel threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  * Note: This is a pristine frame, so we don't need any spill region on top of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  *       childregs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  * The fun part:  if we're keeping the same VM (i.e. cloning a thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  * not an entire process), we're normally given a new usp, and we CANNOT share
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * any live address register windows.  If we just copy those live frames over,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * the two threads (parent and child) will overflow the same frames onto the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * parent stack at different times, likely corrupting the parent stack (esp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * if the parent returns from functions that called clone() and calls new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * ones, before the child overflows its now old copies of its parent windows).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * One solution is to spill windows to the parent stack, but that's fairly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * involved.  Much simpler to just not copy those live frames across.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		unsigned long thread_fn_arg, struct task_struct *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		unsigned long tls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	struct pt_regs *childregs = task_pt_regs(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	struct thread_info *ti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	/* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	SPILL_SLOT(childregs, 1) = (unsigned long)childregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	SPILL_SLOT(childregs, 0) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	p->thread.sp = (unsigned long)childregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	if (!(p->flags & PF_KTHREAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		struct pt_regs *regs = current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		unsigned long usp = usp_thread_fn ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			usp_thread_fn : regs->areg[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		p->thread.ra = MAKE_RA_FOR_CALL(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 				(unsigned long)ret_from_fork, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		/* This does not copy all the regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		 * In a bout of brilliance or madness,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		 * ARs beyond a0-a15 exist past the end of the struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		*childregs = *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		childregs->areg[1] = usp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		childregs->areg[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		/* When sharing memory with the parent thread, the child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		   usually starts on a pristine stack, so we have to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		   windowbase, windowstart and wmask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		   (Note that such a new thread is required to always create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		   an initial call4 frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		   The exception is vfork, where the new thread continues to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		   run on the parent's stack until it calls execve. This could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		   be a call8 or call12, which requires a legal stack frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		   of the previous caller for the overflow handlers to work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		   (Note that it's always legal to overflow live registers).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		   In this case, ensure to spill at least the stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		   of that frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		if (clone_flags & CLONE_VM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 			/* check that caller window is live and same stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 			int len = childregs->wmask & ~0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			if (regs->areg[1] == usp && len != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 				int callinc = (regs->areg[0] >> 30) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 				int caller_ars = XCHAL_NUM_AREGS - callinc * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 				put_user(regs->areg[caller_ars+1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 					 (unsigned __user*)(usp - 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			childregs->wmask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			childregs->windowstart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			childregs->windowbase = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			int len = childregs->wmask & ~0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 			       &regs->areg[XCHAL_NUM_AREGS - len/4], len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		childregs->syscall = regs->syscall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		if (clone_flags & CLONE_SETTLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			childregs->threadptr = tls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		p->thread.ra = MAKE_RA_FOR_CALL(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 				(unsigned long)ret_from_kernel_thread, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		/* pass parameters to ret_from_kernel_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		 * a2 = thread_fn, a3 = thread_fn arg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		SPILL_SLOT(childregs, 3) = thread_fn_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		SPILL_SLOT(childregs, 2) = usp_thread_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		/* Childregs are only used when we're going to userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		 * in which case start_thread will set them up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	ti = task_thread_info(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	ti->cpenable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	clear_ptrace_hw_breakpoint(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  * These bracket the sleeping functions..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unsigned long get_wchan(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	unsigned long sp, pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	unsigned long stack_page = (unsigned long) task_stack_page(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	if (!p || p == current || p->state == TASK_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	sp = p->thread.sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		if (sp < stack_page + sizeof(struct task_struct) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		    sp >= (stack_page + THREAD_SIZE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		    pc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		if (!in_sched_functions(pc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			return pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		/* Stack layout: sp-4: ra, sp-3: sp' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		sp = SPILL_SLOT(sp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	} while (count++ < 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }