Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  PowerPC version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Copyright (C) 2001 IBM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *  Derived from "arch/i386/kernel/signal.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *    Copyright (C) 1991, 1992 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/binfmts.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <asm/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <asm/sigcontext.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <asm/vdso.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <asm/tm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <asm/asm-prototypes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include "ppc32.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <asm/ucontext.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include "signal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define old_sigaction	old_sigaction32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define sigcontext	sigcontext32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define mcontext	mcontext32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define ucontext	ucontext32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define __save_altstack __compat_save_altstack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  * Userspace code may pass a ucontext which doesn't include VSX added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * at the end.  We need to check for this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define UCONTEXTSIZEWITHOUTVSX \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  * Returning 0 means we return to userspace via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  * ret_from_except and thus restore all user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * registers from *regs.  This is what we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  * to do when a signal has been delivered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #undef __SIGNAL_FRAMESIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #undef ELF_NVRREG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define ELF_NVRREG	ELF_NVRREG32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  * Functions for flipping sigsets (thanks to brain dead generic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  * implementation that makes things simple for little endian only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	return put_compat_sigset(uset, set, sizeof(*uset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) static inline int get_sigset_t(sigset_t *set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 			       const compat_sigset_t __user *uset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	return get_compat_sigset(set, uset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define to_user_ptr(p)		ptr_to_compat(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define from_user_ptr(p)	compat_ptr(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) static inline int save_general_regs(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		struct mcontext __user *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	int val, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	WARN_ON(!FULL_REGS(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	for (i = 0; i <= PT_RESULT; i ++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		/* Force usr to alway see softe as 1 (interrupts enabled) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		if (i == PT_SOFTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 			val = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 			val = gregs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		if (__put_user(val, &frame->mc_gregs[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) static inline int restore_general_regs(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		struct mcontext __user *sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	for (i = 0; i <= PT_RESULT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		if ((i == PT_MSR) || (i == PT_SOFTE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		if (__get_user(gregs[i], &sr->mc_gregs[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) #else /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	return copy_to_user(uset, set, sizeof(*uset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	return copy_from_user(set, uset, sizeof(*uset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) #define to_user_ptr(p)		((unsigned long)(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) #define from_user_ptr(p)	((void __user *)(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static inline int save_general_regs(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		struct mcontext __user *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	WARN_ON(!FULL_REGS(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) static inline int restore_general_regs(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		struct mcontext __user *sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	/* copy up to but not including MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	if (__copy_from_user(regs, &sr->mc_gregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 				PT_MSR * sizeof(elf_greg_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	/* copy from orig_r3 (the word after the MSR) up to the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  * When we have signals to deliver, we set up on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  * user stack, going down from the original stack pointer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  *	an ABI gap of 56 words
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  *	an mcontext struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  *	a sigcontext struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  *	a gap of __SIGNAL_FRAMESIZE bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * Each of these things must be a multiple of 16 bytes in size. The following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) struct sigframe {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	struct sigcontext sctx;		/* the sigcontext */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	struct mcontext	mctx;		/* all the register values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	struct sigcontext sctx_transact;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	struct mcontext	mctx_transact;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	 * regs and 18 fp regs below sp before decrementing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	int			abigap[56];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) /* We use the mc_pad field for the signal return trampoline. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) #define tramp	mc_pad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  *  When we have rt signals to deliver, we set up on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  *  user stack, going down from the original stack pointer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208)  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209)  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  *  (the +16 is to get the siginfo and ucontext in the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211)  *  positions as in older kernels).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  *  Each of these things must be a multiple of 16 bytes in size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) struct rt_sigframe {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	compat_siginfo_t info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	struct siginfo info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	struct ucontext	uc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	struct ucontext	uc_transact;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	 * regs and 18 fp regs below sp before decrementing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	int			abigap[56];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234)  * Save the current user registers on the user stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  * We only save the altivec/spe registers if the process has used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236)  * altivec/spe instructions at some point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 			  struct mcontext __user *tm_frame, int sigret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 			  int ctx_has_vsx_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	unsigned long msr = regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	/* Make sure floating point registers are stored in regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	flush_fp_to_thread(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	/* save general registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	if (save_general_regs(regs, frame))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	/* save altivec registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	if (current->thread.used_vr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		flush_altivec_to_thread(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 				   ELF_NVRREG * sizeof(vector128)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		/* set MSR_VEC in the saved MSR value to indicate that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		   frame->mc_vregs contains valid data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		msr |= MSR_VEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	/* else assert((regs->msr & MSR_VEC) == 0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	 * use altivec. Since VSCR only contains 32 bits saved in the least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	 * most significant bits of that same vector. --BenH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	 * Note that the current VRSAVE value is in the SPR at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) #endif /* CONFIG_ALTIVEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	if (copy_fpr_to_user(&frame->mc_fregs, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	 * Clear the MSR VSX bit to indicate there is no valid state attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	 * to this context, except in the specific case below where we set it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	msr &= ~MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	 * Copy VSR 0-31 upper half from thread_struct to local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	 * buffer, then write that to userspace.  Also set MSR_VSX in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	 * the saved MSR value to indicate that frame->mc_vregs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	 * contains valid data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	if (current->thread.used_vsr && ctx_has_vsx_region) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		flush_vsx_to_thread(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		if (copy_vsx_to_user(&frame->mc_vsregs, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		msr |= MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) #endif /* CONFIG_VSX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) #ifdef CONFIG_SPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	/* save spe registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	if (current->thread.used_spe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		flush_spe_to_thread(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 				   ELF_NEVRREG * sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		/* set MSR_SPE in the saved MSR value to indicate that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		   frame->mc_vregs contains valid data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		msr |= MSR_SPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	/* else assert((regs->msr & MSR_SPE) == 0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	/* We always copy to/from spefscr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) #endif /* CONFIG_SPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	 * can check it on the restore to see if TM is active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	if (sigret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		/* Set up the sigreturn trampoline: li 0,sigret; sc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		    || __put_user(PPC_INST_SC, &frame->tramp[1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		flush_icache_range((unsigned long) &frame->tramp[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 				   (unsigned long) &frame->tramp[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * Save the current user registers on the user stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  * We only save the altivec/spe registers if the process has used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * altivec/spe instructions at some point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  * We also save the transactional registers to a second ucontext in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  * frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) static int save_tm_user_regs(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 			     struct mcontext __user *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 			     struct mcontext __user *tm_frame, int sigret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 			     unsigned long msr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	WARN_ON(tm_suspend_disabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	/* Save both sets of general registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (save_general_regs(&current->thread.ckpt_regs, frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	    || save_general_regs(regs, tm_frame))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	/* Stash the top half of the 64bit MSR into the 32bit MSR word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	 * of the transactional mcontext.  This way we have a backward-compatible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	 * also look at what type of transaction (T or S) was active at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	 * time of the signal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	/* save altivec registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	if (current->thread.used_vr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		if (__copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 				   ELF_NVRREG * sizeof(vector128)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		if (msr & MSR_VEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			if (__copy_to_user(&tm_frame->mc_vregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 					   &current->thread.vr_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 					   ELF_NVRREG * sizeof(vector128)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			if (__copy_to_user(&tm_frame->mc_vregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 					   &current->thread.ckvr_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 					   ELF_NVRREG * sizeof(vector128)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		/* set MSR_VEC in the saved MSR value to indicate that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		 * frame->mc_vregs contains valid data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		msr |= MSR_VEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	 * use altivec. Since VSCR only contains 32 bits saved in the least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 * most significant bits of that same vector. --BenH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	if (__put_user(current->thread.ckvrsave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		       (u32 __user *)&frame->mc_vregs[32]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	if (msr & MSR_VEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		if (__put_user(current->thread.vrsave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			       (u32 __user *)&tm_frame->mc_vregs[32]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		if (__put_user(current->thread.ckvrsave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 			       (u32 __user *)&tm_frame->mc_vregs[32]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) #endif /* CONFIG_ALTIVEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (copy_ckfpr_to_user(&frame->mc_fregs, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	if (msr & MSR_FP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	 * Copy VSR 0-31 upper half from thread_struct to local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	 * buffer, then write that to userspace.  Also set MSR_VSX in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	 * the saved MSR value to indicate that frame->mc_vregs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	 * contains valid data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (current->thread.used_vsr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		if (msr & MSR_VSX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			if (copy_vsx_to_user(&tm_frame->mc_vsregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 						      current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		msr |= MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) #endif /* CONFIG_VSX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) #ifdef CONFIG_SPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	/* SPE regs are not checkpointed with TM, so this section is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	 * simply the same as in save_user_regs().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	if (current->thread.used_spe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		flush_spe_to_thread(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 				   ELF_NEVRREG * sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		/* set MSR_SPE in the saved MSR value to indicate that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		 * frame->mc_vregs contains valid data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		msr |= MSR_SPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	/* We always copy to/from spefscr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) #endif /* CONFIG_SPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	if (sigret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		/* Set up the sigreturn trampoline: li 0,sigret; sc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		    || __put_user(PPC_INST_SC, &frame->tramp[1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		flush_icache_range((unsigned long) &frame->tramp[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 				   (unsigned long) &frame->tramp[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  * Restore the current user register values from the user stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  * (except for MSR).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) static long restore_user_regs(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			      struct mcontext __user *sr, int sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	long err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	unsigned int save_r2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	unsigned long msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	 * restore general registers but not including MSR or SOFTE. Also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	 * take care of keeping r2 (TLS) intact if not a signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if (!sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		save_r2 = (unsigned int)regs->gpr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	err = restore_general_regs(regs, sr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	set_trap_norestart(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	if (!sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		regs->gpr[2] = (unsigned long) save_r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	/* if doing signal return, restore the previous little-endian mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	if (sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	 * Force the process to reload the altivec registers from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	 * current->thread when it next does altivec instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	regs->msr &= ~MSR_VEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	if (msr & MSR_VEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		/* restore altivec registers from the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 				     sizeof(sr->mc_vregs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		current->thread.used_vr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	} else if (current->thread.used_vr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		memset(&current->thread.vr_state, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		       ELF_NVRREG * sizeof(vector128));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	/* Always get VRSAVE back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) #endif /* CONFIG_ALTIVEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (copy_fpr_from_user(current, &sr->mc_fregs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	 * Force the process to reload the VSX registers from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	 * current->thread when it next does VSX instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	regs->msr &= ~MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	if (msr & MSR_VSX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		 * Restore altivec registers from the stack to a local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		 * buffer, then write this out to the thread_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		if (copy_vsx_from_user(current, &sr->mc_vsregs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		current->thread.used_vsr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	} else if (current->thread.used_vsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		for (i = 0; i < 32 ; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) #endif /* CONFIG_VSX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	 * force the process to reload the FP registers from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	 * current->thread when it next does FP instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) #ifdef CONFIG_SPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	/* force the process to reload the spe registers from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	   current->thread when it next does spe instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	regs->msr &= ~MSR_SPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	if (msr & MSR_SPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		/* restore spe registers from the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 				     ELF_NEVRREG * sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		current->thread.used_spe = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	} else if (current->thread.used_spe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	/* Always get SPEFSCR back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) #endif /* CONFIG_SPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  * Restore the current user register values from the user stack, except for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  * MSR, and recheckpoint the original checkpointed register state for processes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  * in transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) static long restore_tm_user_regs(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 				 struct mcontext __user *sr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 				 struct mcontext __user *tm_sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	long err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	unsigned long msr, msr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	if (tm_suspend_disabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	 * restore general registers but not including MSR or SOFTE. Also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	 * take care of keeping r2 (TLS) intact if not a signal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	 * See comment in signal_64.c:restore_tm_sigcontexts();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	 * were set by the signal delivery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	err = restore_general_regs(regs, tm_sr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	err |= restore_general_regs(&current->thread.ckpt_regs, sr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	/* Restore the previous little-endian mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	regs->msr &= ~MSR_VEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	if (msr & MSR_VEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		/* restore altivec registers from the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 				     sizeof(sr->mc_vregs)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		    __copy_from_user(&current->thread.vr_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 				     &tm_sr->mc_vregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 				     sizeof(sr->mc_vregs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		current->thread.used_vr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	} else if (current->thread.used_vr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		memset(&current->thread.vr_state, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		       ELF_NVRREG * sizeof(vector128));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		memset(&current->thread.ckvr_state, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		       ELF_NVRREG * sizeof(vector128));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	/* Always get VRSAVE back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	if (__get_user(current->thread.ckvrsave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		       (u32 __user *)&sr->mc_vregs[32]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	    __get_user(current->thread.vrsave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		       (u32 __user *)&tm_sr->mc_vregs[32]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) #endif /* CONFIG_ALTIVEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	if (copy_fpr_from_user(current, &sr->mc_fregs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	    copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) #ifdef CONFIG_VSX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	regs->msr &= ~MSR_VSX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	if (msr & MSR_VSX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		 * Restore altivec registers from the stack to a local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		 * buffer, then write this out to the thread_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		    copy_ckvsx_from_user(current, &sr->mc_vsregs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		current->thread.used_vsr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	} else if (current->thread.used_vsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		for (i = 0; i < 32 ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) #endif /* CONFIG_VSX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) #ifdef CONFIG_SPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	/* SPE regs are not checkpointed with TM, so this section is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	 * simply the same as in restore_user_regs().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	regs->msr &= ~MSR_SPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	if (msr & MSR_SPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 				     ELF_NEVRREG * sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		current->thread.used_spe = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	} else if (current->thread.used_spe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	/* Always get SPEFSCR back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		       + ELF_NEVRREG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) #endif /* CONFIG_SPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	/* Get the top half of the MSR from the user context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	msr_hi <<= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	/* If TM bits are set to the reserved value, it's an invalid context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	if (MSR_TM_RESV(msr_hi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	 * Disabling preemption, since it is unsafe to be preempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	 * with MSR[TS] set without recheckpointing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	 * CAUTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	 * After regs->MSR[TS] being updated, make sure that get_user(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	 * put_user() or similar functions are *not* called. These
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	 * functions can generate page faults which will cause the process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	 * to be de-scheduled with MSR[TS] set but without calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	 * tm_recheckpoint(). This can cause a bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	 * Pull in the MSR TM bits from the user context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	 * transactional versions should be loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	tm_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	/* Make sure the transaction is marked as failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	current->thread.tm_texasr |= TEXASR_FS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	/* This loads the checkpointed FP/VEC state, if used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	tm_recheckpoint(&current->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	/* This loads the speculative FP/VEC state, if used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	msr_check_and_set(msr & (MSR_FP | MSR_VEC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	if (msr & MSR_FP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		load_fp_state(&current->thread.fp_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		regs->msr |= (MSR_FP | current->thread.fpexc_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) #ifdef CONFIG_ALTIVEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	if (msr & MSR_VEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		load_vr_state(&current->thread.vr_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		regs->msr |= MSR_VEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) #define copy_siginfo_to_user	copy_siginfo_to_user32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748)  * Set up a signal frame for a "real-time" signal handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749)  * (one which gets siginfo).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		       struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	struct rt_sigframe __user *rt_sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	struct mcontext __user *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	struct mcontext __user *tm_frame = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	void __user *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	unsigned long newsp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	int sigret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	unsigned long tramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	struct pt_regs *regs = tsk->thread.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	/* Save the thread's msr before get_tm_stackpointer() changes it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	unsigned long msr = regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	BUG_ON(tsk != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	/* Set up Signal Frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	/* Put a Real Time Context onto stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	addr = rt_sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	if (unlikely(rt_sf == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	/* Put the siginfo & fill in most of the ucontext */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	    || __put_user(0, &rt_sf->uc.uc_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	    || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		    &rt_sf->uc.uc_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	/* Save user registers on the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	frame = &rt_sf->uc.uc_mcontext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	addr = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		sigret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		sigret = __NR_rt_sigreturn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		tramp = (unsigned long) frame->tramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	tm_frame = &rt_sf->uc_transact.uc_mcontext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	if (MSR_TM_ACTIVE(msr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		if (__put_user((unsigned long)&rt_sf->uc_transact,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			       &rt_sf->uc.uc_link) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		    __put_user((unsigned long)tm_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			       &rt_sf->uc_transact.uc_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		if (__put_user(0, &rt_sf->uc.uc_link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 			goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		if (save_user_regs(regs, frame, tm_frame, sigret, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	regs->link = tramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	/* create a stack frame for the caller of the handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	addr = (void __user *)regs->gpr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	/* Fill registers for signal handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	regs->gpr[1] = newsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	regs->gpr[3] = ksig->sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	regs->gpr[4] = (unsigned long) &rt_sf->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	regs->gpr[5] = (unsigned long) &rt_sf->uc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	regs->gpr[6] = (unsigned long) rt_sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	/* enter the signal handler in native-endian mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	regs->msr &= ~MSR_LE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	regs->msr |= (MSR_KERNEL & MSR_LE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) badframe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	if (show_unhandled_signals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		printk_ratelimited(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 				   "%s[%d]: bad frame in handle_rt_signal32: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 				   "%p nip %08lx lr %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 				   tsk->comm, tsk->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 				   addr, regs->nip, regs->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	sigset_t set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	struct mcontext __user *mcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	if (get_sigset_t(&set, &ucp->uc_sigmask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		u32 cmcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		if (__get_user(cmcp, &ucp->uc_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		mcp = (struct mcontext __user *)(u64)cmcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		/* no need to check access_ok(mcp), since mcp < 4GB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	if (__get_user(mcp, &ucp->uc_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	if (!access_ok(mcp, sizeof(*mcp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	set_current_blocked(&set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	if (restore_user_regs(regs, mcp, sig))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) static int do_setcontext_tm(struct ucontext __user *ucp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			    struct ucontext __user *tm_ucp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			    struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	sigset_t set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	struct mcontext __user *mcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	struct mcontext __user *tm_mcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	u32 cmcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	u32 tm_cmcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	if (get_sigset_t(&set, &ucp->uc_sigmask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	if (__get_user(cmcp, &ucp->uc_regs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	    __get_user(tm_cmcp, &tm_ucp->uc_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	mcp = (struct mcontext __user *)(u64)cmcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	/* no need to check access_ok(mcp), since mcp < 4GB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	set_current_blocked(&set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (restore_tm_user_regs(regs, mcp, tm_mcp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		       struct ucontext __user *, new_ctx, int, ctx_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		       struct ucontext __user *, new_ctx, long, ctx_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	struct pt_regs *regs = current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	int ctx_has_vsx_region = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	unsigned long new_msr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	if (new_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		struct mcontext __user *mcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		u32 cmcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		 * Get pointer to the real mcontext.  No need for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		 * access_ok since we are dealing with compat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		 * pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		if (__get_user(cmcp, &new_ctx->uc_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		mcp = (struct mcontext __user *)(u64)cmcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	 * Check that the context is not smaller than the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	 * size (with VMX but without VSX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	 * If the new context state sets the MSR VSX bits but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	 * it doesn't provide VSX state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	if ((ctx_size < sizeof(struct ucontext)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	    (new_msr & MSR_VSX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	/* Does the context have enough room to store VSX data? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (ctx_size >= sizeof(struct ucontext))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		ctx_has_vsx_region = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	/* Context size is for future use. Right now, we only make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	 * we are passed something we understand
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	if (ctx_size < sizeof(struct ucontext))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	if (old_ctx != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		struct mcontext __user *mctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		 * old_ctx might not be 16-byte aligned, in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		 * case old_ctx->uc_mcontext won't be either.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		 * Because we have the old_ctx->uc_pad2 field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		 * before old_ctx->uc_mcontext, we need to round down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		mctx = (struct mcontext __user *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		if (!access_ok(old_ctx, ctx_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		    || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	if (new_ctx == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (!access_ok(new_ctx, ctx_size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	    fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	 * If we get a fault copying the context into the kernel's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	 * image of the user's registers, we can't just return -EFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	 * because the user's registers will be corrupted.  For instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	 * the NIP value may have been updated but not some of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	 * other registers.  Given that we have done the access_ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	 * and successfully read the first and last bytes of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	 * above, this should only happen in an out-of-memory situation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	 * or if another thread unmaps the region containing the context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	 * We kill the task with a SIGSEGV in this situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	if (do_setcontext(new_ctx, regs, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		do_exit(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	set_thread_flag(TIF_RESTOREALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) SYSCALL_DEFINE0(rt_sigreturn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	struct rt_sigframe __user *rt_sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	struct pt_regs *regs = current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	int tm_restore = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	struct ucontext __user *uc_transact;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	unsigned long msr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	/* Always make any pending restarted system calls return -EINTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	current->restart_block.fn = do_no_restart_syscall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	rt_sf = (struct rt_sigframe __user *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	if (!access_ok(rt_sf, sizeof(*rt_sf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	 * If there is a transactional state then throw it away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	 * The purpose of a sigreturn is to destroy all traces of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	 * signal frame, this includes any transactional state created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	 * within in. We only check for suspended as we can never be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	 * active in the kernel, we are active, there is nothing better to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	 * do than go ahead and Bad Thing later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	 * The cause is not important as there will never be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	 * recheckpoint so it's not user visible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	if (MSR_TM_SUSPENDED(mfmsr()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		tm_reclaim_current(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	if (__get_user(tmp, &rt_sf->uc.uc_link))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	if (uc_transact) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		u32 cmcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		struct mcontext __user *mcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		if (__get_user(cmcp, &uc_transact->uc_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		mcp = (struct mcontext __user *)(u64)cmcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		/* The top 32 bits of the MSR are stashed in the transactional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		 * ucontext. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		if (MSR_TM_ACTIVE(msr_hi<<32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			/* Trying to start TM on non TM system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			if (!cpu_has_feature(CPU_FTR_TM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 				goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			/* We only recheckpoint on return if we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			 * transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			tm_restore = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 				goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	if (!tm_restore) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		 * Unset regs->msr because ucontext MSR TS is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		 * set, and recheckpoint was not called. This avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		 * hitting a TM Bad thing at RFID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		regs->msr &= ~MSR_TS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	/* Fall through, for non-TM restore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	if (!tm_restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		if (do_setcontext(&rt_sf->uc, regs, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	 * It's not clear whether or why it is desirable to save the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	 * sigaltstack setting on signal delivery and restore it on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	 * signal return.  But other architectures do this and we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	 * always done it up until now so it is probably better not to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	 * change it.  -- paulus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	if (restore_altstack(&rt_sf->uc.uc_stack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	set_thread_flag(TIF_RESTOREALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)  bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	if (show_unhandled_signals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		printk_ratelimited(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 				   "%s[%d]: bad frame in sys_rt_sigreturn: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 				   "%p nip %08lx lr %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 				   current->comm, current->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 				   rt_sf, regs->nip, regs->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	force_sig(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) #ifdef CONFIG_PPC32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			 int, ndbg, struct sig_dbg_op __user *, dbg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	struct pt_regs *regs = current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	struct sig_dbg_op op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	unsigned long new_msr = regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) #ifdef CONFIG_PPC_ADV_DEBUG_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	unsigned long new_dbcr0 = current->thread.debug.dbcr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	for (i=0; i<ndbg; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		if (copy_from_user(&op, dbg + i, sizeof(op)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		switch (op.dbg_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		case SIG_DBG_SINGLE_STEPPING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) #ifdef CONFIG_PPC_ADV_DEBUG_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			if (op.dbg_value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 				new_msr |= MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 				new_dbcr0 &= ~DBCR0_IC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 						current->thread.debug.dbcr1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 					new_msr &= ~MSR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 					new_dbcr0 &= ~DBCR0_IDM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 			if (op.dbg_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 				new_msr |= MSR_SE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 				new_msr &= ~MSR_SE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		case SIG_DBG_BRANCH_TRACING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) #ifdef CONFIG_PPC_ADV_DEBUG_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			if (op.dbg_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 				new_msr |= MSR_BE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 				new_msr &= ~MSR_BE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	/* We wait until here to actually install the values in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	   registers so if we fail in the above loop, it will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	   affect the contents of these registers.  After this point,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	   failure is a problem, anyway, and it's very unlikely unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	   the user is really doing something wrong. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	regs->msr = new_msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) #ifdef CONFIG_PPC_ADV_DEBUG_REGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	current->thread.debug.dbcr0 = new_dbcr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	if (!access_ok(ctx, sizeof(*ctx)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	    fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	 * If we get a fault copying the context into the kernel's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	 * image of the user's registers, we can't just return -EFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	 * because the user's registers will be corrupted.  For instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	 * the NIP value may have been updated but not some of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	 * other registers.  Given that we have done the access_ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	 * and successfully read the first and last bytes of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	 * above, this should only happen in an out-of-memory situation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	 * or if another thread unmaps the region containing the context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	 * We kill the task with a SIGSEGV in this situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	if (do_setcontext(ctx, regs, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		if (show_unhandled_signals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 			printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 					   "sys_debug_setcontext: %p nip %08lx "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 					   "lr %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 					   current->comm, current->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 					   ctx, regs->nip, regs->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		force_sig(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	 * It's not clear whether or why it is desirable to save the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	 * sigaltstack setting on signal delivery and restore it on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	 * signal return.  But other architectures do this and we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	 * always done it up until now so it is probably better not to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	 * change it.  -- paulus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	restore_altstack(&ctx->uc_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	set_thread_flag(TIF_RESTOREALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)  * OK, we're invoking a handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	struct sigcontext __user *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	struct sigframe __user *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct mcontext __user *tm_mctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	unsigned long newsp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	int sigret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	unsigned long tramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	struct pt_regs *regs = tsk->thread.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	/* Save the thread's msr before get_tm_stackpointer() changes it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	unsigned long msr = regs->msr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	BUG_ON(tsk != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	/* Set up Signal Frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	if (unlikely(frame == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	sc = (struct sigcontext __user *) &frame->sctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) #if _NSIG != 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) #error "Please adjust handle_signal()"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	    || __put_user(oldset->sig[0], &sc->oldmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	    || __put_user(oldset->sig[1], &sc->_unused[3])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	    || __put_user(ksig->sig, &sc->signal))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		sigret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		sigret = __NR_sigreturn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		tramp = (unsigned long) frame->mctx.tramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	tm_mctx = &frame->mctx_transact;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	if (MSR_TM_ACTIVE(msr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 				      sigret, msr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 			goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	regs->link = tramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	/* create a stack frame for the caller of the handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	regs->gpr[1] = newsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	regs->gpr[3] = ksig->sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	regs->gpr[4] = (unsigned long) sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	/* enter the signal handler in big-endian mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	regs->msr &= ~MSR_LE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) badframe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	if (show_unhandled_signals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		printk_ratelimited(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 				   "%s[%d]: bad frame in handle_signal32: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 				   "%p nip %08lx lr %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 				   tsk->comm, tsk->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 				   frame, regs->nip, regs->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)  * Do a signal return; undo the signal stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) COMPAT_SYSCALL_DEFINE0(sigreturn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) SYSCALL_DEFINE0(sigreturn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	struct pt_regs *regs = current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	struct sigframe __user *sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	struct sigcontext __user *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	struct sigcontext sigctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	struct mcontext __user *sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	void __user *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	sigset_t set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	struct mcontext __user *mcp, *tm_mcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	unsigned long msr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	/* Always make any pending restarted system calls return -EINTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	current->restart_block.fn = do_no_restart_syscall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	sc = &sf->sctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	addr = sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	 * unused part of the signal stackframe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	set.sig[0] = sigctx.oldmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	set.sig[1] = sigctx._unused[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	set_current_blocked(&set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	mcp = (struct mcontext __user *)&sf->mctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	if (MSR_TM_ACTIVE(msr_hi<<32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		if (!cpu_has_feature(CPU_FTR_TM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 			goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		if (restore_tm_user_regs(regs, mcp, tm_mcp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 			goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		addr = sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		if (!access_ok(sr, sizeof(*sr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		    || restore_user_regs(regs, sr, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 			goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	set_thread_flag(TIF_RESTOREALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) badframe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	if (show_unhandled_signals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		printk_ratelimited(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 				   "%s[%d]: bad frame in sys_sigreturn: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 				   "%p nip %08lx lr %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 				   current->comm, current->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 				   addr, regs->nip, regs->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	force_sig(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }