Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Based on arch/arm/kernel/signal.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 1995-2009 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/personality.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/freezer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/tracehook.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/daifflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/debug-monitors.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/ucontext.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <asm/fpsimd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <asm/syscall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <asm/signal32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <asm/vdso.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * Do a signal return; undo the signal stack. These are aligned to 128-bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) struct rt_sigframe {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct siginfo info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct ucontext uc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) struct frame_record {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	u64 fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	u64 lr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) struct rt_sigframe_user_layout {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct rt_sigframe __user *sigframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	struct frame_record __user *next_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	unsigned long size;	/* size of allocated sigframe data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	unsigned long limit;	/* largest allowed size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	unsigned long fpsimd_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	unsigned long esr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	unsigned long sve_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	unsigned long extra_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	unsigned long end_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static void init_user_layout(struct rt_sigframe_user_layout *user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	const size_t reserved_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		sizeof(user->sigframe->uc.uc_mcontext.__reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	memset(user, 0, sizeof(*user));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	user->limit = user->size + reserved_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	user->limit -= TERMINATOR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	user->limit -= EXTRA_CONTEXT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	/* Reserve space for extension and terminator ^ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * Sanity limit on the approximate maximum size of signal frame we'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * try to generate.  Stack alignment padding and the frame record are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * not taken into account.  This limit is not a guarantee and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * NOT ABI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #define SIGFRAME_MAXSZ SZ_64K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 			    unsigned long *offset, size_t size, bool extend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	size_t padded_size = round_up(size, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	if (padded_size > user->limit - user->size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	    !user->extra_offset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	    extend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		user->limit += EXTRA_CONTEXT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		ret = __sigframe_alloc(user, &user->extra_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 				       sizeof(struct extra_context), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			user->limit -= EXTRA_CONTEXT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		/* Reserve space for the __reserved[] terminator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		user->size += TERMINATOR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		 * the terminator:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	/* Still not enough space?  Bad luck! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	if (padded_size > user->limit - user->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	*offset = user->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	user->size += padded_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * Allocate space for an optional record of <size> bytes in the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * signal frame.  The offset from the signal frame base address to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * allocated block is assigned to *offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static int sigframe_alloc(struct rt_sigframe_user_layout *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			  unsigned long *offset, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	return __sigframe_alloc(user, offset, size, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* Allocate the null terminator record and prevent further allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	/* Un-reserve the space reserved for the terminator: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	user->limit += TERMINATOR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	ret = sigframe_alloc(user, &user->end_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			     sizeof(struct _aarch64_ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	/* Prevent further allocation: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	user->limit = user->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static void __user *apply_user_offset(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	struct rt_sigframe_user_layout const *user, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	char __user *base = (char __user *)user->sigframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	return base + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	struct user_fpsimd_state const *fpsimd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		&current->thread.uw.fpsimd_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	/* copy the FP and status/control registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	__put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	__put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	/* copy the magic/size information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	__put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	__put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	return err ? -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	struct user_fpsimd_state fpsimd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	__u32 magic, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	/* check the magic/size information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	__get_user_error(magic, &ctx->head.magic, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	__get_user_error(size, &ctx->head.size, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	/* copy the FP and status/control registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	err = __copy_from_user(fpsimd.vregs, ctx->vregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			       sizeof(fpsimd.vregs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	__get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	__get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	clear_thread_flag(TIF_SVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	/* load the hardware registers from the fpsimd_state structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		fpsimd_update_current_state(&fpsimd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	return err ? -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct user_ctxs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	struct fpsimd_context __user *fpsimd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	struct sve_context __user *sve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #ifdef CONFIG_ARM64_SVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int preserve_sve_context(struct sve_context __user *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	u16 reserved[ARRAY_SIZE(ctx->__reserved)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	unsigned int vl = current->thread.sve_vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	unsigned int vq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	if (test_thread_flag(TIF_SVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		vq = sve_vq_from_vl(vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	memset(reserved, 0, sizeof(reserved));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	__put_user_error(SVE_MAGIC, &ctx->head.magic, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	__put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			 &ctx->head.size, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	__put_user_error(vl, &ctx->vl, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	if (vq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		 * This assumes that the SVE state has already been saved to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		 * the task struct by calling the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		 * fpsimd_signal_preserve_current_state().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 				      current->thread.sve_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 				      SVE_SIG_REGS_SIZE(vq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	return err ? -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static int restore_sve_fpsimd_context(struct user_ctxs *user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	unsigned int vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	struct user_fpsimd_state fpsimd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	struct sve_context sve;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (__copy_from_user(&sve, user->sve, sizeof(sve)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	if (sve.vl != current->thread.sve_vl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	if (sve.head.size <= sizeof(*user->sve)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		clear_thread_flag(TIF_SVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		goto fpsimd_only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	vq = sve_vq_from_vl(sve.vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	 * Careful: we are about __copy_from_user() directly into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	 * thread.sve_state with preemption enabled, so protection is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	 * needed to prevent a racing context switch from writing stale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	 * registers back over the new data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	fpsimd_flush_task_state(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	sve_alloc(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	err = __copy_from_user(current->thread.sve_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 			       (char __user const *)user->sve +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 					SVE_SIG_REGS_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			       SVE_SIG_REGS_SIZE(vq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	set_thread_flag(TIF_SVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) fpsimd_only:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	/* copy the FP and status/control registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	/* restore_sigframe() already checked that user->fpsimd != NULL. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 			       sizeof(fpsimd.vregs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	__get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	__get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	/* load the hardware registers from the fpsimd_state structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		fpsimd_update_current_state(&fpsimd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	return err ? -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #else /* ! CONFIG_ARM64_SVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* Turn any non-optimised out attempts to use these into a link error: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) extern int preserve_sve_context(void __user *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) extern int restore_sve_fpsimd_context(struct user_ctxs *user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #endif /* ! CONFIG_ARM64_SVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static int parse_user_sigframe(struct user_ctxs *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 			       struct rt_sigframe __user *sf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	struct _aarch64_ctx __user *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	char __user *base = (char __user *)&sc->__reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	size_t offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	size_t limit = sizeof(sc->__reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	bool have_extra_context = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	char const __user *const sfp = (char const __user *)sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	user->fpsimd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	user->sve = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	if (!IS_ALIGNED((unsigned long)base, 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		u32 magic, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		char const __user *userp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		struct extra_context const __user *extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		u64 extra_datap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		u32 extra_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		struct _aarch64_ctx const __user *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		u32 end_magic, end_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		if (limit - offset < sizeof(*head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		if (!IS_ALIGNED(offset, 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		head = (struct _aarch64_ctx __user *)(base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		__get_user_error(magic, &head->magic, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		__get_user_error(size, &head->size, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		if (limit - offset < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		switch (magic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		case FPSIMD_MAGIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 			if (!system_supports_fpsimd())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			if (user->fpsimd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			if (size < sizeof(*user->fpsimd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			user->fpsimd = (struct fpsimd_context __user *)head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		case ESR_MAGIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 			/* ignore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		case SVE_MAGIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			if (!system_supports_sve())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			if (user->sve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			if (size < sizeof(*user->sve))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 			user->sve = (struct sve_context __user *)head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		case EXTRA_MAGIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 			if (have_extra_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 			if (size < sizeof(*extra))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			userp = (char const __user *)head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 			extra = (struct extra_context const __user *)userp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 			userp += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			__get_user_error(extra_datap, &extra->datap, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			__get_user_error(extra_size, &extra->size, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 			/* Check for the dummy terminator in __reserved[]: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			if (limit - offset - size < TERMINATOR_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			end = (struct _aarch64_ctx const __user *)userp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 			userp += TERMINATOR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 			__get_user_error(end_magic, &end->magic, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			__get_user_error(end_size, &end->size, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 			if (end_magic || end_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 			/* Prevent looping/repeated parsing of extra_context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 			have_extra_context = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 			base = (__force void __user *)extra_datap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			if (!IS_ALIGNED((unsigned long)base, 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 			if (!IS_ALIGNED(extra_size, 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 			if (base != userp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 			/* Reject "unreasonably large" frames: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 			 * Ignore trailing terminator in __reserved[]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			 * and start parsing extra data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 			offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 			limit = extra_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 			if (!access_ok(base, limit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 				goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 			goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		if (size < sizeof(*head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 			goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		if (limit - offset < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			goto invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		offset += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) invalid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static int restore_sigframe(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 			    struct rt_sigframe __user *sf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	sigset_t set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	struct user_ctxs user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		set_current_blocked(&set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	for (i = 0; i < 31; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		__get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 				 err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	__get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	__get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	__get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	 * Avoid sys_rt_sigreturn() restarting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	forget_syscall(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	err |= !valid_user_regs(&regs->user_regs, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		err = parse_user_sigframe(&user, sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	if (err == 0 && system_supports_fpsimd()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		if (!user.fpsimd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		if (user.sve) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 			if (!system_supports_sve())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 			err = restore_sve_fpsimd_context(&user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 			err = restore_fpsimd_context(user.fpsimd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) SYSCALL_DEFINE0(rt_sigreturn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	struct pt_regs *regs = current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	struct rt_sigframe __user *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	/* Always make any pending restarted system calls return -EINTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	current->restart_block.fn = do_no_restart_syscall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	 * be word aligned here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	if (regs->sp & 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	frame = (struct rt_sigframe __user *)regs->sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	if (!access_ok(frame, sizeof (*frame)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	if (restore_sigframe(regs, frame))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	if (restore_altstack(&frame->uc.uc_stack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	return regs->regs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) badframe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	arm64_notify_segfault(regs->sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)  * Determine the layout of optional records in the signal frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)  * add_all: if true, lays out the biggest possible signal frame for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)  *	this task; otherwise, generates a layout for the current state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)  *	of the task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 				 bool add_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	if (system_supports_fpsimd()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		err = sigframe_alloc(user, &user->fpsimd_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 				     sizeof(struct fpsimd_context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	/* fault information, if valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	if (add_all || current->thread.fault_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		err = sigframe_alloc(user, &user->esr_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 				     sizeof(struct esr_context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	if (system_supports_sve()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		unsigned int vq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		if (add_all || test_thread_flag(TIF_SVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 			int vl = sve_max_vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 			if (!add_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 				vl = current->thread.sve_vl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 			vq = sve_vq_from_vl(vl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		err = sigframe_alloc(user, &user->sve_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 				     SVE_SIG_CONTEXT_SIZE(vq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	return sigframe_alloc_end(user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static int setup_sigframe(struct rt_sigframe_user_layout *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 			  struct pt_regs *regs, sigset_t *set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	struct rt_sigframe __user *sf = user->sigframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	/* set up the stack frame for unwinding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	__put_user_error(regs->regs[29], &user->next_frame->fp, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	__put_user_error(regs->regs[30], &user->next_frame->lr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	for (i = 0; i < 31; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		__put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 				 err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	__put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	__put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	__put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	if (err == 0 && system_supports_fpsimd()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		struct fpsimd_context __user *fpsimd_ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 			apply_user_offset(user, user->fpsimd_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 		err |= preserve_fpsimd_context(fpsimd_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	/* fault information, if valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	if (err == 0 && user->esr_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		struct esr_context __user *esr_ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 			apply_user_offset(user, user->esr_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 		__put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		__put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	/* Scalable Vector Extension state, if present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	if (system_supports_sve() && err == 0 && user->sve_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		struct sve_context __user *sve_ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 			apply_user_offset(user, user->sve_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		err |= preserve_sve_context(sve_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	if (err == 0 && user->extra_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 		char __user *sfp = (char __user *)user->sigframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		char __user *userp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 			apply_user_offset(user, user->extra_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		struct extra_context __user *extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 		struct _aarch64_ctx __user *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		u64 extra_datap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		u32 extra_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 		extra = (struct extra_context __user *)userp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 		userp += EXTRA_CONTEXT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 		end = (struct _aarch64_ctx __user *)userp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 		userp += TERMINATOR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 		 * extra_datap is just written to the signal frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		 * The value gets cast back to a void __user *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 		 * during sigreturn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 		extra_datap = (__force u64)userp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		extra_size = sfp + round_up(user->size, 16) - userp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		__put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		__put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		__put_user_error(extra_datap, &extra->datap, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 		__put_user_error(extra_size, &extra->size, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		/* Add the terminator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 		__put_user_error(0, &end->magic, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 		__put_user_error(0, &end->size, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	/* set the "end" magic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		struct _aarch64_ctx __user *end =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 			apply_user_offset(user, user->end_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		__put_user_error(0, &end->magic, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 		__put_user_error(0, &end->size, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static int get_sigframe(struct rt_sigframe_user_layout *user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 			 struct ksignal *ksig, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	unsigned long sp, sp_top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	init_user_layout(user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	err = setup_sigframe_layout(user, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	sp = sp_top = sigsp(regs->sp, ksig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	sp = round_down(sp - sizeof(struct frame_record), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	user->next_frame = (struct frame_record __user *)sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	sp = round_down(sp, 16) - sigframe_size(user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	user->sigframe = (struct rt_sigframe __user *)sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	 * Check that we can actually write to the signal frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	if (!access_ok(user->sigframe, sp_top - sp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 			 struct rt_sigframe_user_layout *user, int usig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	__sigrestore_t sigtramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	regs->regs[0] = usig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	regs->sp = (unsigned long)user->sigframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	regs->regs[29] = (unsigned long)&user->next_frame->fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	regs->pc = (unsigned long)ka->sa.sa_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	 * Signal delivery is a (wacky) indirect function call in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	 * userspace, so simulate the same setting of BTYPE as a BLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	 * <register containing the signal handler entry point>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	 * Signal delivery to a location in a PROT_BTI guarded page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	 * that is not a function entry point will now trigger a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	 * SIGILL in userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	 * If the signal handler entry point is not in a PROT_BTI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	 * guarded page, this is harmless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	if (system_supports_bti()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 		regs->pstate &= ~PSR_BTYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 		regs->pstate |= PSR_BTYPE_C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	/* TCO (Tag Check Override) always cleared for signal handlers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	regs->pstate &= ~PSR_TCO_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	if (ka->sa.sa_flags & SA_RESTORER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 		sigtramp = ka->sa.sa_restorer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 		sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	regs->regs[30] = (unsigned long)sigtramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 			  struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	struct rt_sigframe_user_layout user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	struct rt_sigframe __user *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	fpsimd_signal_preserve_current_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	if (get_sigframe(&user, ksig, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	frame = user.sigframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	__put_user_error(0, &frame->uc.uc_flags, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	__put_user_error(NULL, &frame->uc.uc_link, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	err |= setup_sigframe(&user, regs, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 		setup_return(regs, &ksig->ka, &user, usig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 		if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 			err |= copy_siginfo_to_user(&frame->info, &ksig->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 			regs->regs[1] = (unsigned long)&frame->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 			regs->regs[2] = (unsigned long)&frame->uc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) static void setup_restart_syscall(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	if (is_compat_task())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 		compat_setup_restart_syscall(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 		regs->regs[8] = __NR_restart_syscall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)  * OK, we're invoking a handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	sigset_t *oldset = sigmask_to_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	int usig = ksig->sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	rseq_signal_deliver(ksig, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	 * Set up the stack frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	if (is_compat_task()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 		if (ksig->ka.sa.sa_flags & SA_SIGINFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 			ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 			ret = compat_setup_frame(usig, ksig, oldset, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 		ret = setup_rt_frame(usig, ksig, oldset, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 	 * Check that the resulting registers are actually sane.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 	ret |= !valid_user_regs(&regs->user_regs, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 	/* Step into the signal handler if we are stepping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 	signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)  * Note that 'init' is a special process: it doesn't get signals it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)  * want to handle. Thus you cannot kill init even with a SIGKILL even by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)  * mistake.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)  * Note that we go through the signals twice: once to check the signals that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)  * the kernel can handle, and then we build all the user-level signal handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)  * stack-frames in one go after that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) static void do_signal(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 	unsigned long continue_addr = 0, restart_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 	int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 	struct ksignal ksig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 	bool syscall = in_syscall(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 	 * If we were from a system call, check for system call restarting...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 	if (syscall) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 		continue_addr = regs->pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 		restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 		retval = regs->regs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 		 * Avoid additional syscall restarting via ret_to_user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 		forget_syscall(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 		 * Prepare for system call restart. We do this here so that a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 		 * debugger will see the already changed PC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 		switch (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 		case -ERESTARTNOHAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 		case -ERESTARTSYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 		case -ERESTARTNOINTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 		case -ERESTART_RESTARTBLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 			regs->regs[0] = regs->orig_x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 			regs->pc = restart_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 	 * Get the signal to deliver. When running under ptrace, at this point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 	 * the debugger may change all of our registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 	if (get_signal(&ksig)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 		 * Depending on the signal settings, we may need to revert the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 		 * decision to restart the system call, but skip this if a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 		 * debugger has chosen to restart at a different PC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 		if (regs->pc == restart_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 		    (retval == -ERESTARTNOHAND ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 		     retval == -ERESTART_RESTARTBLOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 		     (retval == -ERESTARTSYS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 		      !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 			syscall_set_return_value(current, regs, -EINTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 			regs->pc = continue_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 		handle_signal(&ksig, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) 	 * Handle restarting a different system call. As above, if a debugger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 	 * has chosen to restart at a different PC, ignore the restart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 	if (syscall && regs->pc == restart_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 		if (retval == -ERESTART_RESTARTBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 			setup_restart_syscall(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 		user_rewind_single_step(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 	restore_saved_sigmask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) asmlinkage void do_notify_resume(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 				 unsigned long thread_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) 		/* Check valid user FS if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) 		addr_limit_user_check();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) 		if (thread_flags & _TIF_NEED_RESCHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) 			/* Unmask Debug and SError for the next task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 			local_daif_restore(DAIF_PROCCTX_NOIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) 			local_daif_restore(DAIF_PROCCTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) 			if (thread_flags & _TIF_UPROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) 				uprobe_notify_resume(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) 			if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) 				clear_thread_flag(TIF_MTE_ASYNC_FAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) 				send_sig_fault(SIGSEGV, SEGV_MTEAERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) 					       (void __user *)NULL, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) 			if (thread_flags & _TIF_SIGPENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) 				do_signal(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) 			if (thread_flags & _TIF_NOTIFY_RESUME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) 				tracehook_notify_resume(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) 				rseq_handle_notify_resume(NULL, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) 			if (thread_flags & _TIF_FOREIGN_FPSTATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) 				fpsimd_restore_current_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) 		local_daif_mask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) 		thread_flags = READ_ONCE(current_thread_info()->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) 	} while (thread_flags & _TIF_WORK_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) unsigned long __ro_after_init signal_minsigstksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)  * Determine the stack space required for guaranteed signal devliery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)  * This function is used to populate AT_MINSIGSTKSZ at process startup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)  * cpufeatures setup is assumed to be complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) void __init minsigstksz_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) 	struct rt_sigframe_user_layout user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) 	init_user_layout(&user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) 	 * If this fails, SIGFRAME_MAXSZ needs to be enlarged.  It won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) 	 * be big enough, but it's our best guess:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) 	if (WARN_ON(setup_sigframe_layout(&user, true)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) 	signal_minsigstksz = sigframe_size(&user) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) 		round_up(sizeof(struct frame_record), 16) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) 		16; /* max alignment padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }