Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * FPU signal frame handling routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/fpu/internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/fpu/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <asm/fpu/regset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/fpu/xstate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/sigframe.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/trace/fpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * Check for the presence of extended state information in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * user fpstate pointer in the sigcontext.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static inline int check_for_xstate(struct fxregs_state __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 				   void __user *fpstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 				   struct _fpx_sw_bytes *fx_sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	int min_xstate_size = sizeof(struct fxregs_state) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 			      sizeof(struct xstate_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	unsigned int magic2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	/* Check for the first magic field and other error scenarios. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	    fx_sw->xstate_size < min_xstate_size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	    fx_sw->xstate_size > fpu_user_xstate_size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	    fx_sw->xstate_size > fx_sw->extended_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	 * Check for the presence of second magic word at the end of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	 * layout. This detects the case where the user just copied the legacy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	 * fpstate layout with out copying the extended state information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	 * in the memory layout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	    || magic2 != FP_XSTATE_MAGIC2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * Signal frame handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	if (use_fxsr()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		struct user_i387_ia32_struct env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		struct _fpstate_32 __user *fp = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		fpregs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		if (!test_thread_flag(TIF_NEED_FPU_LOAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 			copy_fxregs_to_kernel(&tsk->thread.fpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		fpregs_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		convert_from_fxsr(&env, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		if (__copy_to_user(buf, &env, sizeof(env)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		    __put_user(xsave->i387.swd, &fp->status) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		    __put_user(X86_FXSR_MAGIC, &fp->magic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		struct fregs_state __user *fp = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		u32 swd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	struct xregs_state __user *x = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	struct _fpx_sw_bytes *sw_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	u32 xfeatures;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	/* Setup the bytes not touched by the [f]xsave and reserved for SW. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	if (!use_xsave())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	err |= __put_user(FP_XSTATE_MAGIC2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			  (__u32 __user *)(buf + fpu_user_xstate_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	 * Read the xfeatures which we copied (directly from the cpu or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	 * from the state in task struct) to the user buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	 * For legacy compatible, we always set FP/SSE bits in the bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 * vector while saving the state to the user context. This will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 * enable us capturing any changes(during sigreturn) to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 * the FP/SSE bits by the legacy applications which don't touch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	 * xfeatures in the xsave header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	 * xsave aware apps can change the xfeatures in the xsave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	 * header as well as change any contents in the memory layout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	 * xrestore as part of sigreturn will capture all the changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	xfeatures |= XFEATURE_MASK_FPSSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	if (use_xsave())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		err = copy_xregs_to_user(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	else if (use_fxsr())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		err = copy_fxregs_to_user((struct fxregs_state __user *) buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		err = copy_fregs_to_user((struct fregs_state __user *) buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	if (unlikely(err) && __clear_user(buf, fpu_user_xstate_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * Save the fpu, extended register state to the user signal frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  *  state is copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  *  'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  *	buf == buf_fx for 64-bit frames and 32-bit fsave frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  *	buf != buf_fx for 32-bit frames with fxstate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * Try to save it directly to the user frame with disabled page fault handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * If this fails then do the slow path where the FPU state is first saved to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * task's fpu->state and then copy it to the user frame pointed to by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * aligned pointer 'buf_fx'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * If this is a 32-bit frame with fxstate, put a fsave header before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * the aligned state at 'buf_fx'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * indicating the absence/presence of the extended state to the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	int ia32_fxstate = (buf != buf_fx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			 IS_ENABLED(CONFIG_IA32_EMULATION));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	if (!static_cpu_has(X86_FEATURE_FPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		struct user_i387_ia32_struct fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		fpregs_soft_get(current, NULL, (struct membuf){.p = &fp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 						.left = sizeof(fp)});
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		return copy_to_user(buf, &fp, sizeof(fp)) ? -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	if (!access_ok(buf, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	 * Load the FPU registers if they are not valid for the current task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	 * With a valid FPU state we can attempt to save the state directly to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	 * userland's stack frame which will likely succeed. If it does not,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	 * resolve the fault in the user memory and try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	fpregs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	if (test_thread_flag(TIF_NEED_FPU_LOAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		__fpregs_load_activate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	pagefault_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	ret = copy_fpregs_to_sigframe(buf_fx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	pagefault_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	fpregs_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	/* Save the fsave header for the 32-bit frames. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) sanitize_restored_user_xstate(union fpregs_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			      struct user_i387_ia32_struct *ia32_env,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			      u64 user_xfeatures, int fx_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	struct xregs_state *xsave = &state->xsave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	struct xstate_header *header = &xsave->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (use_xsave()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		 * Clear all feature bits which are not set in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		 * user_xfeatures and clear all extended features
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		 * for fx_only mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		u64 mask = fx_only ? XFEATURE_MASK_FPSSE : user_xfeatures;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		 * Supervisor state has to be preserved. The sigframe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		 * restore can only modify user features, i.e. @mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		 * cannot contain them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		header->xfeatures &= mask | xfeatures_mask_supervisor();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if (use_fxsr()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		 * mscsr reserved bits must be masked to zero for security
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		 * reasons.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		xsave->i387.mxcsr &= mxcsr_feature_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		if (ia32_env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			convert_to_fxsr(&state->fxsave, ia32_env);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  * Restore the extended state if present. Otherwise, restore the FP/SSE state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	u64 init_bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	if (use_xsave()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		if (fx_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			init_bv = xfeatures_mask_user() & ~XFEATURE_MASK_FPSSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			r = copy_user_to_fxregs(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 				copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 			init_bv = xfeatures_mask_user() & ~xbv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 			r = copy_user_to_xregs(buf, xbv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			if (!r && unlikely(init_bv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 				copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	} else if (use_fxsr()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		return copy_user_to_fxregs(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		return copy_user_to_fregs(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	struct user_i387_ia32_struct *envp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	int state_size = fpu_kernel_xstate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	int ia32_fxstate = (buf != buf_fx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	struct fpu *fpu = &tsk->thread.fpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	struct user_i387_ia32_struct env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	u64 user_xfeatures = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	int fx_only = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 			 IS_ENABLED(CONFIG_IA32_EMULATION));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		fpu__clear_user_states(fpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	if (!access_ok(buf, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		ret = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	if (!static_cpu_has(X86_FEATURE_FPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		ret = fpregs_soft_set(current, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 				      sizeof(struct user_i387_ia32_struct),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 				      NULL, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	if (use_xsave()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		struct _fpx_sw_bytes fx_sw_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 			 * Couldn't find the extended state information in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			 * memory layout. Restore just the FP/SSE and init all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			 * the other extended state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 			state_size = sizeof(struct fxregs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 			fx_only = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 			trace_x86_fpu_xstate_check_failed(fpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 			state_size = fx_sw_user.xstate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			user_xfeatures = fx_sw_user.xfeatures;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if ((unsigned long)buf_fx % 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		fx_only = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	if (!ia32_fxstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		 * Attempt to restore the FPU registers directly from user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		 * memory. For that to succeed, the user access cannot cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		 * page faults. If it does, fall back to the slow path below,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		 * going through the kernel buffer with the enabled pagefault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		 * handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		fpregs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		pagefault_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		ret = copy_user_to_fpregs_zeroing(buf_fx, user_xfeatures, fx_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		pagefault_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			 * Restore supervisor states: previous context switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			 * etc has done XSAVES and saved the supervisor states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			 * in the kernel buffer from which they can be restored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			 * now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			 * We cannot do a single XRSTORS here - which would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			 * be nice - because the rest of the FPU registers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			 * being restored from a user buffer directly. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			 * single XRSTORS happens below, when the user buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			 * has been copied to the kernel one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			if (test_thread_flag(TIF_NEED_FPU_LOAD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			    xfeatures_mask_supervisor())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 				copy_kernel_to_xregs(&fpu->state.xsave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 						     xfeatures_mask_supervisor());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			fpregs_mark_activate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			fpregs_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		 * The above did an FPU restore operation, restricted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		 * the user portion of the registers, and failed, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		 * microcode might have modified the FPU registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		 * nevertheless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		 * If the FPU registers do not belong to current, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		 * invalidate the FPU register state otherwise the task might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		 * preempt current and return to user space with corrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		 * FPU registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		 * In case current owns the FPU registers then no further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		 * action is required. The fixup below will handle it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		 * correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		if (test_thread_flag(TIF_NEED_FPU_LOAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			__cpu_invalidate_fpregs_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		fpregs_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		 * For 32-bit frames with fxstate, copy the fxstate so it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		 * be reconstructed later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		ret = __copy_from_user(&env, buf, sizeof(env));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		envp = &env;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	 * By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	 * not modified on context switch and that the xstate is considered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	 * to be loaded again on return to userland (overriding last_cpu avoids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	 * the optimisation).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	fpregs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		 * Supervisor states are not modified by user space input.  Save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		 * current supervisor states first and invalidate the FPU regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		if (xfeatures_mask_supervisor())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 			copy_supervisor_to_kernel(&fpu->state.xsave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		set_thread_flag(TIF_NEED_FPU_LOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	__fpu_invalidate_fpregs_state(fpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	fpregs_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	if (use_xsave() && !fx_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 					      fx_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		fpregs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		if (unlikely(init_bv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		 * Restore previously saved supervisor xstates along with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		 * copied-in user xstates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		ret = copy_kernel_to_xregs_err(&fpu->state.xsave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 					       user_xfeatures | xfeatures_mask_supervisor());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	} else if (use_fxsr()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 			ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 					      fx_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		fpregs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		if (use_xsave()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			u64 init_bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 			init_bv = xfeatures_mask_user() & ~XFEATURE_MASK_FPSSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		ret = copy_kernel_to_fxregs_err(&fpu->state.fxsave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		fpregs_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		fpregs_mark_activate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		fpregs_deactivate(fpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	fpregs_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		fpu__clear_user_states(fpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static inline int xstate_sigframe_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			fpu_user_xstate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)  * Restore FPU state from a sigframe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int fpu__restore_sig(void __user *buf, int ia32_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	void __user *buf_fx = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	int size = xstate_sigframe_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	if (ia32_frame && use_fxsr()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		buf_fx = buf + sizeof(struct fregs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		size += sizeof(struct fregs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	return __fpu__restore_sig(buf, buf_fx, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		     unsigned long *buf_fx, unsigned long *size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	unsigned long frame_size = xstate_sigframe_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	*buf_fx = sp = round_down(sp - frame_size, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	if (ia32_frame && use_fxsr()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		frame_size += sizeof(struct fregs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		sp -= sizeof(struct fregs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	*size = frame_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	return sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)  * Prepare the SW reserved portion of the fxsave memory layout, indicating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)  * the presence of the extended state information in the memory layout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)  * pointed by the fpstate pointer in the sigcontext.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)  * This will be saved when ever the FP and extended state context is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)  * saved on the user stack during the signal handler delivery to the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) void fpu__init_prepare_fx_sw_frame(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	int size = fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	fx_sw_reserved.extended_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	fx_sw_reserved.xfeatures = xfeatures_mask_user();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	fx_sw_reserved.xstate_size = fpu_user_xstate_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	if (IS_ENABLED(CONFIG_IA32_EMULATION) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	    IS_ENABLED(CONFIG_X86_32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		int fsave_header_size = sizeof(struct fregs_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		fx_sw_reserved_ia32 = fx_sw_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		fx_sw_reserved_ia32.extended_size = size + fsave_header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)