Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8) #include <linux/stackprotector.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9) #include <asm/fpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) struct kmem_cache *task_xstate_cachep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) unsigned int xstate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #ifdef CONFIG_STACKPROTECTOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) unsigned long __stack_chk_guard __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) EXPORT_SYMBOL(__stack_chk_guard);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)  * this gets called so that we can store lazy state into memory and copy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)  * current task into the new thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) 	unlazy_fpu(src, task_pt_regs(src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) 	*dst = *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) 	if (src->thread.xstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) 		dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) 						      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) 		if (!dst->thread.xstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) 		memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) void free_thread_xstate(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) 	if (tsk->thread.xstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) 		kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) 		tsk->thread.xstate = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void arch_release_task_struct(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) 	free_thread_xstate(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) void arch_task_cache_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) 	if (!xstate_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) 	task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) 					       __alignof__(union thread_xstate),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) 					       SLAB_PANIC, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #ifdef CONFIG_SH_FPU_EMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) # define HAVE_SOFTFP	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) # define HAVE_SOFTFP	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) void init_thread_xstate(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) 	if (boot_cpu_data.flags & CPU_HAS_FPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) 		xstate_size = sizeof(struct sh_fpu_hard_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) 	else if (HAVE_SOFTFP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) 		xstate_size = sizeof(struct sh_fpu_soft_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) 		xstate_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }