Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  linux/arch/arm/kernel/signal.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  Copyright (C) 1995-2009 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/personality.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/tracehook.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/uprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/vfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "signal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) extern const unsigned long sigreturn_codes[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) static unsigned long signal_return_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #ifdef CONFIG_CRUNCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static int preserve_crunch_context(struct crunch_sigframe __user *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	char kbuf[sizeof(*frame) + 8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct crunch_sigframe *kframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	/* the crunch context must be 64 bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	kframe->magic = CRUNCH_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	kframe->size = CRUNCH_STORAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	crunch_task_copy(current_thread_info(), &kframe->storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	return __copy_to_user(frame, kframe, sizeof(*frame));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static int restore_crunch_context(char __user **auxp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct crunch_sigframe __user *frame =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		(struct crunch_sigframe __user *)*auxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	char kbuf[sizeof(*frame) + 8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	struct crunch_sigframe *kframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	/* the crunch context must be 64 bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	if (__copy_from_user(kframe, frame, sizeof(*frame)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	if (kframe->magic != CRUNCH_MAGIC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	    kframe->size != CRUNCH_STORAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	*auxp += CRUNCH_STORAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	crunch_task_restore(current_thread_info(), &kframe->storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #ifdef CONFIG_IWMMXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	char kbuf[sizeof(*frame) + 8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct iwmmxt_sigframe *kframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	/* the iWMMXt context must be 64 bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	if (test_thread_flag(TIF_USING_IWMMXT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		kframe->magic = IWMMXT_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		kframe->size = IWMMXT_STORAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		iwmmxt_task_copy(current_thread_info(), &kframe->storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		 * For bug-compatibility with older kernels, some space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		 * has to be reserved for iWMMXt even if it's not used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		 * Set the magic and size appropriately so that properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		 * written userspace can skip it reliably:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		*kframe = (struct iwmmxt_sigframe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			.magic = DUMMY_MAGIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			.size  = IWMMXT_STORAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	err = __copy_to_user(frame, kframe, sizeof(*kframe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static int restore_iwmmxt_context(char __user **auxp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	struct iwmmxt_sigframe __user *frame =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		(struct iwmmxt_sigframe __user *)*auxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	char kbuf[sizeof(*frame) + 8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct iwmmxt_sigframe *kframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	/* the iWMMXt context must be 64 bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	if (__copy_from_user(kframe, frame, sizeof(*frame)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	 * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	 * block is discarded for compatibility with setup_sigframe() if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	 * present, but we don't mandate its presence.  If some other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	 * magic is here, it's not for us:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	if (!test_thread_flag(TIF_USING_IWMMXT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	    kframe->magic != DUMMY_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	if (kframe->size != IWMMXT_STORAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	if (test_thread_flag(TIF_USING_IWMMXT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		if (kframe->magic != IWMMXT_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		iwmmxt_task_restore(current_thread_info(), &kframe->storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	*auxp += IWMMXT_STORAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #ifdef CONFIG_VFP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static int preserve_vfp_context(struct vfp_sigframe __user *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	struct vfp_sigframe kframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	memset(&kframe, 0, sizeof(kframe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	kframe.magic = VFP_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	kframe.size = VFP_STORAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	return __copy_to_user(frame, &kframe, sizeof(kframe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static int restore_vfp_context(char __user **auxp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	struct vfp_sigframe frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	err = __copy_from_user(&frame, *auxp, sizeof(frame));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	*auxp += sizeof(frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * Do a signal return; undo the signal stack.  These are aligned to 64-bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	struct sigcontext context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	char __user *aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	sigset_t set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		set_current_blocked(&set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		regs->ARM_r0 = context.arm_r0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		regs->ARM_r1 = context.arm_r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		regs->ARM_r2 = context.arm_r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		regs->ARM_r3 = context.arm_r3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		regs->ARM_r4 = context.arm_r4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		regs->ARM_r5 = context.arm_r5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		regs->ARM_r6 = context.arm_r6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		regs->ARM_r7 = context.arm_r7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		regs->ARM_r8 = context.arm_r8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		regs->ARM_r9 = context.arm_r9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		regs->ARM_r10 = context.arm_r10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		regs->ARM_fp = context.arm_fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		regs->ARM_ip = context.arm_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		regs->ARM_sp = context.arm_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		regs->ARM_lr = context.arm_lr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		regs->ARM_pc = context.arm_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		regs->ARM_cpsr = context.arm_cpsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	err |= !valid_user_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	aux = (char __user *) sf->uc.uc_regspace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #ifdef CONFIG_CRUNCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		err |= restore_crunch_context(&aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #ifdef CONFIG_IWMMXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		err |= restore_iwmmxt_context(&aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #ifdef CONFIG_VFP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		err |= restore_vfp_context(&aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) asmlinkage int sys_sigreturn(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	struct sigframe __user *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	/* Always make any pending restarted system calls return -EINTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	current->restart_block.fn = do_no_restart_syscall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 * Since we stacked the signal on a 64-bit boundary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	 * then 'sp' should be word aligned here.  If it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	 * not, then the user is trying to mess with us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	if (regs->ARM_sp & 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	frame = (struct sigframe __user *)regs->ARM_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	if (!access_ok(frame, sizeof (*frame)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	if (restore_sigframe(regs, frame))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	return regs->ARM_r0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) badframe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	force_sig(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	struct rt_sigframe __user *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	/* Always make any pending restarted system calls return -EINTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	current->restart_block.fn = do_no_restart_syscall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	 * Since we stacked the signal on a 64-bit boundary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	 * then 'sp' should be word aligned here.  If it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	 * not, then the user is trying to mess with us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (regs->ARM_sp & 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	frame = (struct rt_sigframe __user *)regs->ARM_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	if (!access_ok(frame, sizeof (*frame)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	if (restore_sigframe(regs, &frame->sig))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	if (restore_altstack(&frame->sig.uc.uc_stack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		goto badframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	return regs->ARM_r0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) badframe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	force_sig(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	struct aux_sigframe __user *aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	struct sigcontext context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	context = (struct sigcontext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		.arm_r0        = regs->ARM_r0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		.arm_r1        = regs->ARM_r1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		.arm_r2        = regs->ARM_r2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		.arm_r3        = regs->ARM_r3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		.arm_r4        = regs->ARM_r4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		.arm_r5        = regs->ARM_r5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		.arm_r6        = regs->ARM_r6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		.arm_r7        = regs->ARM_r7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		.arm_r8        = regs->ARM_r8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		.arm_r9        = regs->ARM_r9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		.arm_r10       = regs->ARM_r10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		.arm_fp        = regs->ARM_fp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		.arm_ip        = regs->ARM_ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		.arm_sp        = regs->ARM_sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		.arm_lr        = regs->ARM_lr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		.arm_pc        = regs->ARM_pc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		.arm_cpsr      = regs->ARM_cpsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		.trap_no       = current->thread.trap_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		.error_code    = current->thread.error_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		.fault_address = current->thread.address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		.oldmask       = set->sig[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #ifdef CONFIG_CRUNCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		err |= preserve_crunch_context(&aux->crunch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #ifdef CONFIG_IWMMXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		err |= preserve_iwmmxt_context(&aux->iwmmxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #ifdef CONFIG_VFP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		err |= preserve_vfp_context(&aux->vfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	err |= __put_user(0, &aux->end_magic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static inline void __user *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	unsigned long sp = sigsp(regs->ARM_sp, ksig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	void __user *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	 * ATPCS B01 mandates 8-byte alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	frame = (void __user *)((sp - framesize) & ~7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	 * Check that we can actually write to the signal frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	if (!access_ok(frame, framesize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		frame = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	return frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) setup_return(struct pt_regs *regs, struct ksignal *ksig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	     unsigned long __user *rc, void __user *frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	unsigned long handler_fdpic_GOT = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	unsigned long retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	unsigned int idx, thumb = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		     (current->personality & FDPIC_FUNCPTRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if (fdpic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		unsigned long __user *fdpic_func_desc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 					(unsigned long __user *)handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		if (__get_user(handler, &fdpic_func_desc[0]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		    __get_user(handler_fdpic_GOT, &fdpic_func_desc[1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	cpsr |= PSR_ENDSTATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	if (ksig->ka.sa.sa_flags & SA_THIRTYTWO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #ifdef CONFIG_ARM_THUMB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	if (elf_hwcap & HWCAP_THUMB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		 * The LSB of the handler determines if we're going to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		 * be using THUMB or ARM mode for this signal handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		thumb = handler & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		 * Clear the If-Then Thumb-2 execution state.  ARM spec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		 * requires this to be all 000s in ARM mode.  Snapdragon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		 * S4/Krait misbehaves on a Thumb=>ARM signal transition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		 * without this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		 * We must do this whenever we are running on a Thumb-2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		 * capable CPU, which includes ARMv6T2.  However, we elect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		 * to always do this to simplify the code; this field is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		 * marked UNK/SBZP for older architectures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		cpsr &= ~PSR_IT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		if (thumb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			cpsr |= PSR_T_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 			cpsr &= ~PSR_T_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	if (ksig->ka.sa.sa_flags & SA_RESTORER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		retcode = (unsigned long)ksig->ka.sa.sa_restorer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		if (fdpic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 			 * We need code to load the function descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 			 * That code follows the standard sigreturn code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			 * (6 words), and is made of 3 + 2 words for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 			 * variant. The 4th copied word is the actual FD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			 * address that the assembly code expects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 			idx = 6 + thumb * 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 			if (ksig->ka.sa.sa_flags & SA_SIGINFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 				idx += 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			if (__put_user(sigreturn_codes[idx],   rc  ) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			    __put_user(sigreturn_codes[idx+1], rc+1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 			    __put_user(sigreturn_codes[idx+2], rc+2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 			    __put_user(retcode,                rc+3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 			goto rc_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		idx = thumb << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		if (ksig->ka.sa.sa_flags & SA_SIGINFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 			idx += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		 * Put the sigreturn code on the stack no matter which return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		 * mechanism we use in order to remain ABI compliant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		if (__put_user(sigreturn_codes[idx],   rc) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		    __put_user(sigreturn_codes[idx+1], rc+1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) rc_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		if (cpsr & MODE32_BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 			struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			 * 32-bit code can use the signal return page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 			 * except when the MPU has protected the vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 			 * page from PL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 			retcode = mm->context.sigpage + signal_return_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 				  (idx << 2) + thumb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			 * Ensure that the instruction cache sees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 			 * the return code written onto the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 			flush_icache_range((unsigned long)rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 					   (unsigned long)(rc + 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 			retcode = ((unsigned long)rc) + thumb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	regs->ARM_r0 = ksig->sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	regs->ARM_sp = (unsigned long)frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	regs->ARM_lr = retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	regs->ARM_pc = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	if (fdpic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		regs->ARM_r9 = handler_fdpic_GOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	regs->ARM_cpsr = cpsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	if (!frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	 * Set uc.uc_flags to a value which sc.trap_no would never have.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	err |= setup_sigframe(frame, regs, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		err = setup_return(regs, ksig, frame->retcode, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	if (!frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	err |= __put_user(0, &frame->sig.uc.uc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	err |= __put_user(NULL, &frame->sig.uc.uc_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	err |= setup_sigframe(&frame->sig, regs, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		err = setup_return(regs, ksig, frame->sig.retcode, frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		 * For realtime signals we must also set the second and third
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		 * arguments for the signal handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		 *   -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		regs->ARM_r1 = (unsigned long)&frame->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		regs->ARM_r2 = (unsigned long)&frame->sig.uc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)  * OK, we're invoking a handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)  */	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	sigset_t *oldset = sigmask_to_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	 * Perform fixup for the pre-signal frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	rseq_signal_deliver(ksig, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	 * Set up the stack frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	if (ksig->ka.sa.sa_flags & SA_SIGINFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		ret = setup_rt_frame(ksig, oldset, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		ret = setup_frame(ksig, oldset, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	 * Check that the resulting registers are actually sane.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	ret |= !valid_user_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	signal_setup_done(ret, ksig, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)  * Note that 'init' is a special process: it doesn't get signals it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)  * want to handle. Thus you cannot kill init even with a SIGKILL even by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)  * mistake.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)  * Note that we go through the signals twice: once to check the signals that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)  * the kernel can handle, and then we build all the user-level signal handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)  * stack-frames in one go after that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static int do_signal(struct pt_regs *regs, int syscall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	struct ksignal ksig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	int restart = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	 * If we were from a system call, check for system call restarting...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	if (syscall) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		continue_addr = regs->ARM_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		retval = regs->ARM_r0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		 * Prepare for system call restart.  We do this here so that a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		 * debugger will see the already changed PSW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		switch (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		case -ERESTART_RESTARTBLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 			restart -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		case -ERESTARTNOHAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		case -ERESTARTSYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		case -ERESTARTNOINTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 			restart++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 			regs->ARM_r0 = regs->ARM_ORIG_r0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 			regs->ARM_pc = restart_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	 * Get the signal to deliver.  When running under ptrace, at this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	 * point the debugger may change all our registers ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	 * Depending on the signal settings we may need to revert the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	 * decision to restart the system call.  But skip this if a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	 * debugger has chosen to restart at a different PC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	if (get_signal(&ksig)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		/* handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		if (unlikely(restart) && regs->ARM_pc == restart_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 			if (retval == -ERESTARTNOHAND ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 			    retval == -ERESTART_RESTARTBLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 			    || (retval == -ERESTARTSYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 				&& !(ksig.ka.sa.sa_flags & SA_RESTART))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 				regs->ARM_r0 = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 				regs->ARM_pc = continue_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		handle_signal(&ksig, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		/* no handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		restore_saved_sigmask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 		if (unlikely(restart) && regs->ARM_pc == restart_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 			regs->ARM_pc = continue_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 			return restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) asmlinkage int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	 * The assembly code enters us with IRQs off, but it hasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	 * informed the tracing code of that for efficiency reasons.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	 * Update the trace code with the current status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	trace_hardirqs_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		if (likely(thread_flags & _TIF_NEED_RESCHED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 			if (unlikely(!user_mode(regs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 			local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 			if (thread_flags & _TIF_SIGPENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 				int restart = do_signal(regs, syscall);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 				if (unlikely(restart)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 					/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 					 * Restart without handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 					 * Deal with it without leaving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 					 * the kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 					 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 					return restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 				syscall = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 			} else if (thread_flags & _TIF_UPROBE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 				uprobe_notify_resume(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 				tracehook_notify_resume(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 				rseq_handle_notify_resume(NULL, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 		local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		thread_flags = current_thread_info()->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	} while (thread_flags & _TIF_WORK_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct page *get_signal_page(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	unsigned long ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	unsigned offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	page = alloc_pages(GFP_KERNEL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	addr = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	/* Poison the entire page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 		 PAGE_SIZE / sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	/* Give the signal return code some randomness */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	offset = 0x200 + (get_random_int() & 0x7fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	signal_return_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	/* Copy signal return handlers into the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	/* Flush out all instructions in this page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	ptr = (unsigned long)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	flush_icache_range(ptr, ptr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /* Defer to generic check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) asmlinkage void addr_limit_check_failed(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	addr_limit_user_check();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) #ifdef CONFIG_DEBUG_RSEQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) asmlinkage void do_rseq_syscall(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	rseq_syscall(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) #endif