^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/fpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) int init_fpu(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) if (tsk_used_math(tsk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) unlazy_fpu(tsk, task_pt_regs(tsk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Memory allocation at the first usage of the FPU and other state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) if (!tsk->thread.xstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) if (!tsk->thread.xstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (boot_cpu_data.flags & CPU_HAS_FPU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) memset(fp, 0, xstate_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) fp->fpscr = FPSCR_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) memset(fp, 0, xstate_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) fp->fpscr = FPSCR_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) set_stopped_child_used_math(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #ifdef CONFIG_SH_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) void __fpu_state_restore(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) restore_fpu(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) task_thread_info(tsk)->status |= TS_USEDFPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) tsk->thread.fpu_counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) void fpu_state_restore(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (unlikely(!user_mode(regs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (!tsk_used_math(tsk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * does a slab alloc which can sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ret = init_fpu(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * ran out of memory!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) force_sig(SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) grab_fpu(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) __fpu_state_restore();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) BUILD_TRAP_HANDLER(fpu_state_restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) TRAP_HANDLER_DECL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) fpu_state_restore(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #endif /* CONFIG_SH_FPU */