^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* linux/arch/sparc/kernel/signal.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1991, 1992 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/binfmts.h> /* do_coredum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/tracehook.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/cacheflush.h> /* flush_sig_insns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "sigutil.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "kernel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) void *fpqueue, unsigned long *fpqdepth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) extern void fpload(unsigned long *fpregs, unsigned long *fsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct signal_frame {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct sparc_stackf ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) __siginfo32_t info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) __siginfo_fpu_t __user *fpu_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long insns[2] __attribute__ ((aligned (8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned int extramask[_NSIG_WORDS - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned int extra_size; /* Should be 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) __siginfo_rwin_t __user *rwin_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) } __attribute__((aligned(8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct rt_signal_frame {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct sparc_stackf ss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) siginfo_t info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct pt_regs regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) sigset_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) __siginfo_fpu_t __user *fpu_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned int insns[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) stack_t stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned int extra_size; /* Should be 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) __siginfo_rwin_t __user *rwin_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) } __attribute__((aligned(8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* Align macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Checks if the fp is valid. We always build signal frames which are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * 16-byte aligned, therefore we can always enforce that the restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * frame has that property as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline bool invalid_frame_pointer(void __user *fp, int fplen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if ((((unsigned long) fp) & 15) || !access_ok(fp, fplen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) asmlinkage void do_sigreturn(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned long up_psr, pc, npc, ufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct signal_frame __user *sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) sigset_t set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) __siginfo_fpu_t __user *fpu_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) __siginfo_rwin_t __user *rwin_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Always make any pending restarted system calls return -EINTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) current->restart_block.fn = do_no_restart_syscall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) synchronize_user_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* 1. Make sure we are not getting garbage from the user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (invalid_frame_pointer(sf, sizeof(*sf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) goto segv_and_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) goto segv_and_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (ufp & 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) goto segv_and_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) err = __get_user(pc, &sf->info.si_regs.pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) err |= __get_user(npc, &sf->info.si_regs.npc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if ((pc | npc) & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) goto segv_and_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* 2. Restore the state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) up_psr = regs->psr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) err |= __copy_from_user(regs, &sf->info.si_regs, sizeof(struct pt_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* User can only change condition codes and FPU enabling in %psr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) regs->psr = (up_psr & ~(PSR_ICC | PSR_EF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) | (regs->psr & (PSR_ICC | PSR_EF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Prevent syscall restart. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) pt_regs_clear_syscall(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) err |= __get_user(fpu_save, &sf->fpu_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (fpu_save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) err |= restore_fpu_state(regs, fpu_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) err |= __get_user(rwin_save, &sf->rwin_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (rwin_save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) err |= restore_rwin_state(rwin_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* This is pretty much atomic, no amount locking would prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * the races which exist anyways.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) err |= __get_user(set.sig[0], &sf->info.si_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) err |= __copy_from_user(&set.sig[1], &sf->extramask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) (_NSIG_WORDS-1) * sizeof(unsigned int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) goto segv_and_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) set_current_blocked(&set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) segv_and_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) force_sig(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct rt_signal_frame __user *sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned int psr, pc, npc, ufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) __siginfo_fpu_t __user *fpu_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) __siginfo_rwin_t __user *rwin_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) sigset_t set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) synchronize_user_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (invalid_frame_pointer(sf, sizeof(*sf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) goto segv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) goto segv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (ufp & 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) goto segv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) err = __get_user(pc, &sf->regs.pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) err |= __get_user(npc, &sf->regs.npc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) err |= ((pc | npc) & 0x03);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) err |= __get_user(regs->y, &sf->regs.y);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) err |= __get_user(psr, &sf->regs.psr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) err |= __copy_from_user(®s->u_regs[UREG_G1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) &sf->regs.u_regs[UREG_G1], 15 * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) regs->psr = (regs->psr & ~PSR_ICC) | (psr & PSR_ICC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* Prevent syscall restart. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) pt_regs_clear_syscall(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) err |= __get_user(fpu_save, &sf->fpu_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!err && fpu_save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) err |= restore_fpu_state(regs, fpu_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) err |= restore_altstack(&sf->stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) goto segv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) regs->pc = pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) regs->npc = npc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) err |= __get_user(rwin_save, &sf->rwin_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (!err && rwin_save) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (restore_rwin_state(rwin_save))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) goto segv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) set_current_blocked(&set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) segv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) force_sig(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned long sp = regs->u_regs[UREG_FP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * If we are on the alternate signal stack and would overflow it, don't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Return an always-bogus address instead so we will die with SIGSEGV.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return (void __user *) -1L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* This is the X/Open sanctioned signal stack switching. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) sp = sigsp(sp, ksig) - framesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* Always align the stack frame. This handles two cases. First,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * sigaltstack need not be mindful of platform specific stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * alignment. Second, if we took this signal because the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * is not aligned properly, we'd like to take the signal cleanly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * and report that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) sp &= ~15UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return (void __user *) sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static int setup_frame(struct ksignal *ksig, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) sigset_t *oldset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct signal_frame __user *sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) int sigframe_size, err, wsaved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) void __user *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* 1. Make sure everything is clean */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) synchronize_user_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) wsaved = current_thread_info()->w_saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) sigframe_size = sizeof(*sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (used_math())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) sigframe_size += sizeof(__siginfo_fpu_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (wsaved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) sigframe_size += sizeof(__siginfo_rwin_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) sf = (struct signal_frame __user *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) get_sigframe(ksig, regs, sigframe_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (invalid_frame_pointer(sf, sigframe_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) do_exit(SIGILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) tail = sf + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* 2. Save the current process state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) err |= __put_user(0, &sf->extra_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (used_math()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) __siginfo_fpu_t __user *fp = tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) tail += sizeof(*fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) err |= save_fpu_state(regs, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) err |= __put_user(fp, &sf->fpu_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) err |= __put_user(0, &sf->fpu_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (wsaved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) __siginfo_rwin_t __user *rwp = tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) tail += sizeof(*rwp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) err |= save_rwin_state(wsaved, rwp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) err |= __put_user(rwp, &sf->rwin_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) err |= __put_user(0, &sf->rwin_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) err |= __put_user(oldset->sig[0], &sf->info.si_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) err |= __copy_to_user(sf->extramask, &oldset->sig[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) (_NSIG_WORDS - 1) * sizeof(unsigned int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (!wsaved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) sizeof(struct reg_window32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct reg_window32 *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) rp = ¤t_thread_info()->reg_window[wsaved - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) err |= __copy_to_user(sf, rp, sizeof(struct reg_window32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* 3. signal handler back-trampoline and parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) regs->u_regs[UREG_FP] = (unsigned long) sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) regs->u_regs[UREG_I0] = ksig->sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* 4. signal handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) regs->npc = (regs->pc + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* 5. return to kernel instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (ksig->ka.ka_restorer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* mov __NR_sigreturn, %g1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) err |= __put_user(0x821020d8, &sf->insns[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* t 0x10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) err |= __put_user(0x91d02010, &sf->insns[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* Flush instruction space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) sigset_t *oldset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct rt_signal_frame __user *sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int sigframe_size, wsaved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) void __user *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) unsigned int psr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) synchronize_user_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) wsaved = current_thread_info()->w_saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) sigframe_size = sizeof(*sf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (used_math())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) sigframe_size += sizeof(__siginfo_fpu_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (wsaved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) sigframe_size += sizeof(__siginfo_rwin_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) sf = (struct rt_signal_frame __user *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) get_sigframe(ksig, regs, sigframe_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (invalid_frame_pointer(sf, sigframe_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) do_exit(SIGILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) tail = sf + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) err = __put_user(regs->pc, &sf->regs.pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) err |= __put_user(regs->npc, &sf->regs.npc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) err |= __put_user(regs->y, &sf->regs.y);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) psr = regs->psr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (used_math())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) psr |= PSR_EF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) err |= __put_user(psr, &sf->regs.psr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) err |= __copy_to_user(&sf->regs.u_regs, regs->u_regs, sizeof(regs->u_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) err |= __put_user(0, &sf->extra_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (psr & PSR_EF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) __siginfo_fpu_t __user *fp = tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) tail += sizeof(*fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) err |= save_fpu_state(regs, fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) err |= __put_user(fp, &sf->fpu_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) err |= __put_user(0, &sf->fpu_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (wsaved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) __siginfo_rwin_t __user *rwp = tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) tail += sizeof(*rwp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) err |= save_rwin_state(wsaved, rwp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) err |= __put_user(rwp, &sf->rwin_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) err |= __put_user(0, &sf->rwin_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* Setup sigaltstack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) err |= __save_altstack(&sf->stack, regs->u_regs[UREG_FP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (!wsaved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) sizeof(struct reg_window32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct reg_window32 *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) rp = ¤t_thread_info()->reg_window[wsaved - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) err |= __copy_to_user(sf, rp, sizeof(struct reg_window32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) err |= copy_siginfo_to_user(&sf->info, &ksig->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) regs->u_regs[UREG_FP] = (unsigned long) sf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) regs->u_regs[UREG_I0] = ksig->sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) regs->u_regs[UREG_I2] = (unsigned long) &sf->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) regs->npc = (regs->pc + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (ksig->ka.ka_restorer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* mov __NR_sigreturn, %g1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) err |= __put_user(0x821020d8, &sf->insns[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* t 0x10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) err |= __put_user(0x91d02010, &sf->insns[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* Flush instruction space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) handle_signal(struct ksignal *ksig, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) sigset_t *oldset = sigmask_to_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (ksig->ka.sa.sa_flags & SA_SIGINFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) err = setup_rt_frame(ksig, regs, oldset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) err = setup_frame(ksig, regs, oldset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) signal_setup_done(err, ksig, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct sigaction *sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) switch(regs->u_regs[UREG_I0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) case ERESTART_RESTARTBLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) case ERESTARTNOHAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) no_system_call_restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) regs->u_regs[UREG_I0] = EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) regs->psr |= PSR_C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) case ERESTARTSYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!(sa->sa_flags & SA_RESTART))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) goto no_system_call_restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) case ERESTARTNOINTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) regs->u_regs[UREG_I0] = orig_i0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) regs->pc -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) regs->npc -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* Note that 'init' is a special process: it doesn't get signals it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * want to handle. Thus you cannot kill init even with a SIGKILL even by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * mistake.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct ksignal ksig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) int restart_syscall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) bool has_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* It's a lot of work and synchronization to add a new ptrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * register for GDB to save and restore in order to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * orig_i0 correct for syscall restarts when debugging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * Although it should be the case that most of the global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * registers are volatile across a system call, glibc already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * depends upon that fact that we preserve them. So we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * just use any global register to save away the orig_i0 value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * In particular %g2, %g3, %g4, and %g5 are all assumed to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * preserved across a system call trap by various pieces of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * code in glibc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * %g7 is used as the "thread register". %g6 is not used in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * any fixed manner. %g6 is used as a scratch register and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * a compiler temporary, but it's value is never used across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * a system call. Therefore %g6 is usable for orig_i0 storage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) regs->u_regs[UREG_G6] = orig_i0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) has_handler = get_signal(&ksig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /* If the debugger messes with the program counter, it clears
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * the software "in syscall" bit, directing us to not perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * a syscall restart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) restart_syscall = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) restart_syscall = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) orig_i0 = regs->u_regs[UREG_G6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (has_handler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (restart_syscall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) syscall_restart(orig_i0, regs, &ksig.ka.sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) handle_signal(&ksig, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (restart_syscall) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) switch (regs->u_regs[UREG_I0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) case ERESTARTNOHAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) case ERESTARTSYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) case ERESTARTNOINTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* replay the system call when we are done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) regs->u_regs[UREG_I0] = orig_i0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) regs->pc -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) regs->npc -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) pt_regs_clear_syscall(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) case ERESTART_RESTARTBLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) regs->u_regs[UREG_G1] = __NR_restart_syscall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) regs->pc -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) regs->npc -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) pt_regs_clear_syscall(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) restore_saved_sigmask();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) unsigned long thread_info_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (thread_info_flags & _TIF_SIGPENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) do_signal(regs, orig_i0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (thread_info_flags & _TIF_NOTIFY_RESUME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) tracehook_notify_resume(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct sigstack __user *ossptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) unsigned long sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* First see if old state is wanted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (ossptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (put_user(current->sas_ss_sp + current->sas_ss_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) &ossptr->the_stack) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) __put_user(on_sig_stack(sp), &ossptr->cur_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /* Now see if we want to update the new state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (ssptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) char *ss_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (get_user(ss_sp, &ssptr->the_stack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /* If the current stack was set with sigaltstack, don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) swap stacks while we are on it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (current->sas_ss_sp && on_sig_stack(sp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* Since we don't know the extent of the stack, and we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) track onstack-ness, but rather calculate it, we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) presume a size. Ho hum this interface is lossy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) current->sas_ss_size = SIGSTKSZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }