^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/kernel/ptrace.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) Copyright 1999 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Common interfaces for "ptrace()" which we do not want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * to continually duplicate across every architecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/coredump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/audit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/pid_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/regset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/cn_proc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/syscall.h> /* for syscall_get_* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Access another process' address space via ptrace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Source/target buffer must be kernel space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Do not walk the page table directly, use get_user_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void *buf, int len, unsigned int gup_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) mm = get_task_mm(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (!tsk->ptrace ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) (current != tsk->parent) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) ((get_dumpable(mm) != SUID_DUMP_USER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) !ptracer_capable(tsk, mm->user_ns))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) const struct cred *ptracer_cred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) BUG_ON(!list_empty(&child->ptrace_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) list_add(&child->ptrace_entry, &new_parent->ptraced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) child->parent = new_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) child->ptracer_cred = get_cred(ptracer_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * ptrace a task: make the debugger its new parent and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * move it to the ptrace list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * Must be called with the tasklist lock write-held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) __ptrace_link(child, new_parent, current_cred());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * __ptrace_unlink - unlink ptracee and restore its execution state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * @child: ptracee to be unlinked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * Remove @child from the ptrace list, move it back to the original parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * and restore the execution state so that it conforms to the group stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * If the ptracer is exiting, the ptracee can be in any state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * After detach, the ptracee should be in a state which conforms to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * group stop. If the group is stopped or in the process of stopping, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * up from TASK_TRACED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * to but in the opposite direction of what happens while attaching to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * stopped task. However, in this direction, the intermediate RUNNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * state is not hidden even from the current ptracer and if it immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * re-attaches and performs a WNOHANG wait(2), it may fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * write_lock_irq(tasklist_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) void __ptrace_unlink(struct task_struct *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) const struct cred *old_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) BUG_ON(!child->ptrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #ifdef TIF_SYSCALL_EMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) child->parent = child->real_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) list_del_init(&child->ptrace_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) old_cred = child->ptracer_cred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) child->ptracer_cred = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) put_cred(old_cred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) spin_lock(&child->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) child->ptrace = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * Clear all pending traps and TRAPPING. TRAPPING should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) task_clear_jobctl_trapping(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * @child isn't dead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (!(child->flags & PF_EXITING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) (child->signal->flags & SIGNAL_STOP_STOPPED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) child->signal->group_stop_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) child->jobctl |= JOBCTL_STOP_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * This is only possible if this thread was cloned by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * traced task running in the stopped group, set the signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * for the future reports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * FIXME: we should change ptrace_init_task() to handle this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) child->jobctl |= SIGSTOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * @child in the butt. Note that @resume should be used iff @child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * is in TASK_TRACED; otherwise, we might unduly disrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * TASK_KILLABLE sleeps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) ptrace_signal_wake_up(child, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) spin_unlock(&child->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static bool looks_like_a_spurious_pid(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (task_pid_vnr(task) == task->ptrace_message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * The tracee changed its pid but the PTRACE_EVENT_EXEC event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * was not wait()'ed, most probably debugger targets the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * leader which was destroyed in de_thread().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Ensure that nothing can wake it up, even SIGKILL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static bool ptrace_freeze_traced(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Lockless, nobody but us can set this flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (task->jobctl & JOBCTL_LISTENING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) spin_lock_irq(&task->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) !__fatal_signal_pending(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) task->state = __TASK_TRACED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) spin_unlock_irq(&task->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static void ptrace_unfreeze_traced(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (task->state != __TASK_TRACED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) WARN_ON(!task->ptrace || task->parent != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * Recheck state under the lock to close this race.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) spin_lock_irq(&task->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (task->state == __TASK_TRACED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (__fatal_signal_pending(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) wake_up_state(task, __TASK_TRACED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) task->state = TASK_TRACED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) spin_unlock_irq(&task->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * ptrace_check_attach - check whether ptracee is ready for ptrace operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * @child: ptracee to check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * @ignore_state: don't check whether @child is currently %TASK_TRACED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * Check whether @child is being ptraced by %current and ready for further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * ptrace operations. If @ignore_state is %false, @child also should be in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * %TASK_TRACED state and on return the child is guaranteed to be traced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * and not executing. If @ignore_state is %true, @child can be in any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * Grabs and releases tasklist_lock and @child->sighand->siglock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * 0 on success, -ESRCH if %child is not ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int ret = -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * We take the read lock around doing both checks to close a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * possible race where someone else was tracing our child and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * detached between these two checks. After this locked check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * we are sure that this is our traced child and that can only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * be changed by us so it's not changing right after this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) read_lock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (child->ptrace && child->parent == current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) WARN_ON(child->state == __TASK_TRACED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * child->sighand can't be NULL, release_task()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * does ptrace_unlink() before __exit_signal().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (ignore_state || ptrace_freeze_traced(child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) read_unlock(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (!ret && !ignore_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!wait_task_inactive(child, __TASK_TRACED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * This can only happen if may_ptrace_stop() fails and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * ptrace_stop() changes ->state back to TASK_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * so we should not worry about leaking __TASK_TRACED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) WARN_ON(child->state == __TASK_TRACED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ret = -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (mode & PTRACE_MODE_NOAUDIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return ns_capable_noaudit(ns, CAP_SYS_PTRACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return ns_capable(ns, CAP_SYS_PTRACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* Returns 0 on success, -errno on denial. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) const struct cred *cred = current_cred(), *tcred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) kuid_t caller_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) kgid_t caller_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* May we inspect the given task?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * This check is used both for attaching with ptrace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * and for allowing access to sensitive information in /proc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * ptrace_attach denies several cases that /proc allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * because setting up the necessary parent/child relationship
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * or halting the specified task is impossible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Don't let security modules deny introspection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (same_thread_group(task, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (mode & PTRACE_MODE_FSCREDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) caller_uid = cred->fsuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) caller_gid = cred->fsgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * Using the euid would make more sense here, but something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * in userland might rely on the old behavior, and this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * shouldn't be a security problem since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * PTRACE_MODE_REALCREDS implies that the caller explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * used a syscall that requests access to another process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * (and not a filesystem syscall to procfs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) caller_uid = cred->uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) caller_gid = cred->gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) tcred = __task_cred(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (uid_eq(caller_uid, tcred->euid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) uid_eq(caller_uid, tcred->suid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) uid_eq(caller_uid, tcred->uid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) gid_eq(caller_gid, tcred->egid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) gid_eq(caller_gid, tcred->sgid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) gid_eq(caller_gid, tcred->gid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) goto ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (ptrace_has_cap(tcred->user_ns, mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) goto ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ok:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * If a task drops privileges and becomes nondumpable (through a syscall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * like setresuid()) while we are trying to access it, we must ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * that the dumpability is read after the credentials; otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * we may be able to attach to a task that we shouldn't be able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * attach to (as if the task had dropped privileges without becoming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * nondumpable).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Pairs with a write barrier in commit_creds().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) mm = task->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (mm &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ((get_dumpable(mm) != SUID_DUMP_USER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) !ptrace_has_cap(mm->user_ns, mode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return security_ptrace_access_check(task, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) bool ptrace_may_access(struct task_struct *task, unsigned int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) task_lock(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) err = __ptrace_may_access(task, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) task_unlock(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return !err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static int check_ptrace_options(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (data & ~(unsigned long)PTRACE_O_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) !IS_ENABLED(CONFIG_SECCOMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) current->ptrace & PT_SUSPEND_SECCOMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static int ptrace_attach(struct task_struct *task, long request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) bool seize = (request == PTRACE_SEIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) retval = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (seize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (addr != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * This duplicates the check in check_ptrace_options() because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * ptrace_attach() and ptrace_setoptions() have historically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * used different error codes for unknown ptrace options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (flags & ~(unsigned long)PTRACE_O_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) retval = check_ptrace_options(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) flags = PT_PTRACED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) audit_ptrace(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) retval = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (unlikely(task->flags & PF_KTHREAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (same_thread_group(task, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * Protect exec's credential calculations against our interference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * SUID, SGID and LSM creds get determined differently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * under ptrace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) retval = -ERESTARTNOINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) task_lock(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) task_unlock(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) goto unlock_creds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) write_lock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) retval = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (unlikely(task->exit_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) goto unlock_tasklist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (task->ptrace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) goto unlock_tasklist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (seize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) flags |= PT_SEIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) task->ptrace = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) ptrace_link(task, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* SEIZE doesn't trap tracee on attach */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (!seize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) spin_lock(&task->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * will be cleared if the child completes the transition or any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * event which clears the group stop states happens. We'll wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * for the transition to complete before returning from this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * This hides STOPPED -> RUNNING -> TRACED transition from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * attaching thread but a different thread in the same group can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * still observe the transient RUNNING state. IOW, if another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * thread's WNOHANG wait(2) on the stopped tracee races against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * ATTACH, the wait(2) may fail due to the transient RUNNING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * The following task_is_stopped() test is safe as both transitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * in and out of STOPPED are protected by siglock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (task_is_stopped(task) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) signal_wake_up_state(task, __TASK_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) spin_unlock(&task->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) unlock_tasklist:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) write_unlock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) unlock_creds:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) mutex_unlock(&task->signal->cred_guard_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (!retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * We do not bother to change retval or clear JOBCTL_TRAPPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * if wait_on_bit() was interrupted by SIGKILL. The tracer will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * not return to user-mode, it will exit and clear this bit in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * __ptrace_unlink() if it wasn't already cleared by the tracee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * and until then nobody can ptrace this task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) proc_ptrace_connector(task, PTRACE_ATTACH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * ptrace_traceme -- helper for PTRACE_TRACEME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * Performs checks and sets PT_PTRACED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * Should be used by all ptrace implementations for PTRACE_TRACEME.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static int ptrace_traceme(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) int ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) write_lock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* Are we already being traced? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (!current->ptrace) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ret = security_ptrace_traceme(current->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * Check PF_EXITING to ensure ->real_parent has not passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * exit_ptrace(). Otherwise we don't report the error but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * pretend ->real_parent untraces us right after return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (!ret && !(current->real_parent->flags & PF_EXITING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) current->ptrace = PT_PTRACED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) ptrace_link(current, current->real_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) write_unlock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * Called with irqs disabled, returns true if childs should reap themselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static int ignoring_children(struct sighand_struct *sigh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) spin_lock(&sigh->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) spin_unlock(&sigh->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * Called with tasklist_lock held for writing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Unlink a traced task, and clean it up if it was a traced zombie.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * Return true if it needs to be reaped with release_task().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * (We can't call release_task() here because we already hold tasklist_lock.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * If it's a zombie, our attachedness prevented normal parent notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * or self-reaping. Do notification now if it would have happened earlier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * If it should reap itself, return true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * If it's our own child, there is no notification to do. But if our normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * children self-reap, then this child was prevented by ptrace and we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * reap it now, in that case we must also wake up sub-threads sleeping in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * do_wait().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) bool dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) __ptrace_unlink(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (p->exit_state != EXIT_ZOMBIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) dead = !thread_group_leader(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (!dead && thread_group_empty(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (!same_thread_group(p->real_parent, tracer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) dead = do_notify_parent(p, p->exit_signal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) else if (ignoring_children(tracer->sighand)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) __wake_up_parent(p, tracer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) dead = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* Mark it as in the process of being reaped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) p->exit_state = EXIT_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static int ptrace_detach(struct task_struct *child, unsigned int data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (!valid_signal(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /* Architecture-specific hardware disable .. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ptrace_disable(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) write_lock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * We rely on ptrace_freeze_traced(). It can't be killed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * untraced by another thread, it can't be a zombie.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) WARN_ON(!child->ptrace || child->exit_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * tasklist_lock avoids the race with wait_task_stopped(), see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * the comment in ptrace_resume().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) child->exit_code = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) __ptrace_detach(current, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) write_unlock_irq(&tasklist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) proc_ptrace_connector(child, PTRACE_DETACH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * Detach all tasks we were using ptrace on. Called with tasklist held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * for writing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct task_struct *p, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (unlikely(p->ptrace & PT_EXITKILL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) send_sig_info(SIGKILL, SEND_SIG_PRIV, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (__ptrace_detach(tracer, p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) list_add(&p->ptrace_entry, dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) char buf[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) int this_len, retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (!retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (copy_to_user(dst, buf, retval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) copied += retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) src += retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) dst += retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) len -= retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) int copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) char buf[128];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) int this_len, retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (copy_from_user(buf, src, this_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) retval = ptrace_access_vm(tsk, dst, buf, this_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) FOLL_FORCE | FOLL_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (!retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) copied += retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) src += retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) dst += retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) len -= retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) static int ptrace_setoptions(struct task_struct *child, unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) unsigned flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) ret = check_ptrace_options(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* Avoid intermediate state when all opts are cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) flags = child->ptrace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) flags |= (data << PT_OPT_FLAG_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) child->ptrace = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) int error = -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (lock_task_sighand(child, &flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (likely(child->last_siginfo != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) copy_siginfo(info, child->last_siginfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) unlock_task_sighand(child, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) int error = -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (lock_task_sighand(child, &flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (likely(child->last_siginfo != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) copy_siginfo(child->last_siginfo, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) unlock_task_sighand(child, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static int ptrace_peek_siginfo(struct task_struct *child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct ptrace_peeksiginfo_args arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct sigpending *pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct sigqueue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) ret = copy_from_user(&arg, (void __user *) addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) sizeof(struct ptrace_peeksiginfo_args));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return -EINVAL; /* unknown flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (arg.nr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /* Ensure arg.off fits in an unsigned long */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (arg.off > ULONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) pending = &child->signal->shared_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) pending = &child->pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) for (i = 0; i < arg.nr; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) kernel_siginfo_t info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned long off = arg.off + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) spin_lock_irq(&child->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) list_for_each_entry(q, &pending->list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (!off--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) copy_siginfo(&info, &q->info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) spin_unlock_irq(&child->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (!found) /* beyond the end of the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (unlikely(in_compat_syscall())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) compat_siginfo_t __user *uinfo = compat_ptr(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (copy_siginfo_to_user32(uinfo, &info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) siginfo_t __user *uinfo = (siginfo_t __user *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (copy_siginfo_to_user(uinfo, &info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) data += sizeof(siginfo_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (i > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) #ifdef PTRACE_SINGLESTEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) #define is_singlestep(request) 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) #ifdef PTRACE_SINGLEBLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) #define is_singleblock(request) 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) #ifdef PTRACE_SYSEMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) #define is_sysemu_singlestep(request) 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static int ptrace_resume(struct task_struct *child, long request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) bool need_siglock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (!valid_signal(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (request == PTRACE_SYSCALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) #ifdef TIF_SYSCALL_EMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (is_singleblock(request)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (unlikely(!arch_has_block_step()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) user_enable_block_step(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (unlikely(!arch_has_single_step()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) user_enable_single_step(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) user_disable_single_step(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * Change ->exit_code and ->state under siglock to avoid the race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * with wait_task_stopped() in between; a non-zero ->exit_code will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * wrongly look like another report from tracee.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * Note that we need siglock even if ->exit_code == data and/or this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * status was not reported yet, the new status must not be cleared by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * wait_task_stopped() after resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * If data == 0 we do not care if wait_task_stopped() reports the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * status and clears the code too; this can't race with the tracee, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * takes siglock after resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) need_siglock = data && !thread_group_empty(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (need_siglock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) spin_lock_irq(&child->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) child->exit_code = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) wake_up_state(child, __TASK_TRACED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (need_siglock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) spin_unlock_irq(&child->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) static const struct user_regset *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) find_regset(const struct user_regset_view *view, unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) const struct user_regset *regset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) for (n = 0; n < view->n; ++n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) regset = view->regsets + n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (regset->core_note_type == type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return regset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct iovec *kiov)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) const struct user_regset_view *view = task_user_regset_view(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) const struct user_regset *regset = find_regset(view, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) int regset_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (!regset || (kiov->iov_len % regset->size) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) regset_no = regset - view->regsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) kiov->iov_len = min(kiov->iov_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) (__kernel_size_t) (regset->n * regset->size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (req == PTRACE_GETREGSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return copy_regset_to_user(task, view, regset_no, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) kiov->iov_len, kiov->iov_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return copy_regset_from_user(task, view, regset_no, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) kiov->iov_len, kiov->iov_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * This is declared in linux/regset.h and defined in machine-dependent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * code. We put the export here, near the primary machine-neutral use,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * to ensure no machine forgets it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) EXPORT_SYMBOL_GPL(task_user_regset_view);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct ptrace_syscall_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) unsigned long args[ARRAY_SIZE(info->entry.args)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) info->op = PTRACE_SYSCALL_INFO_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) info->entry.nr = syscall_get_nr(child, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) syscall_get_arguments(child, regs, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) for (i = 0; i < ARRAY_SIZE(args); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) info->entry.args[i] = args[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /* args is the last field in struct ptrace_syscall_info.entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return offsetofend(struct ptrace_syscall_info, entry.args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct ptrace_syscall_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * As struct ptrace_syscall_info.entry is currently a subset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * of struct ptrace_syscall_info.seccomp, it makes sense to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * initialize that subset using ptrace_get_syscall_info_entry().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * This can be reconsidered in the future if these structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * diverge significantly enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) ptrace_get_syscall_info_entry(child, regs, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) info->op = PTRACE_SYSCALL_INFO_SECCOMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) info->seccomp.ret_data = child->ptrace_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /* ret_data is the last field in struct ptrace_syscall_info.seccomp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return offsetofend(struct ptrace_syscall_info, seccomp.ret_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct ptrace_syscall_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) info->op = PTRACE_SYSCALL_INFO_EXIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) info->exit.rval = syscall_get_error(child, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) info->exit.is_error = !!info->exit.rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (!info->exit.is_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) info->exit.rval = syscall_get_return_value(child, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /* is_error is the last field in struct ptrace_syscall_info.exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) return offsetofend(struct ptrace_syscall_info, exit.is_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) void __user *datavp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct pt_regs *regs = task_pt_regs(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) struct ptrace_syscall_info info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) .op = PTRACE_SYSCALL_INFO_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) .arch = syscall_get_arch(child),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) .instruction_pointer = instruction_pointer(regs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) .stack_pointer = user_stack_pointer(regs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) unsigned long write_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * This does not need lock_task_sighand() to access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * child->last_siginfo because ptrace_freeze_traced()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * called earlier by ptrace_check_attach() ensures that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * the tracee cannot go away and clear its last_siginfo.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) switch (child->last_siginfo ? child->last_siginfo->si_code : 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) case SIGTRAP | 0x80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) switch (child->ptrace_message) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) case PTRACE_EVENTMSG_SYSCALL_ENTRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) actual_size = ptrace_get_syscall_info_entry(child, regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) case PTRACE_EVENTMSG_SYSCALL_EXIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) actual_size = ptrace_get_syscall_info_exit(child, regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) actual_size = ptrace_get_syscall_info_seccomp(child, regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) write_size = min(actual_size, user_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) int ptrace_request(struct task_struct *child, long request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) unsigned long addr, unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) bool seized = child->ptrace & PT_SEIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) int ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) kernel_siginfo_t siginfo, *si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) void __user *datavp = (void __user *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) unsigned long __user *datalp = datavp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) switch (request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) case PTRACE_PEEKTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) case PTRACE_PEEKDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return generic_ptrace_peekdata(child, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) case PTRACE_POKETEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) case PTRACE_POKEDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return generic_ptrace_pokedata(child, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) #ifdef PTRACE_OLDSETOPTIONS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) case PTRACE_OLDSETOPTIONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) case PTRACE_SETOPTIONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ret = ptrace_setoptions(child, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) case PTRACE_GETEVENTMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) ret = put_user(child->ptrace_message, datalp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) case PTRACE_PEEKSIGINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) ret = ptrace_peek_siginfo(child, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) case PTRACE_GETSIGINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) ret = ptrace_getsiginfo(child, &siginfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ret = copy_siginfo_to_user(datavp, &siginfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) case PTRACE_SETSIGINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) ret = copy_siginfo_from_user(&siginfo, datavp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) ret = ptrace_setsiginfo(child, &siginfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) case PTRACE_GETSIGMASK: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) sigset_t *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (addr != sizeof(sigset_t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (test_tsk_restore_sigmask(child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) mask = &child->saved_sigmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) mask = &child->blocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (copy_to_user(datavp, mask, sizeof(sigset_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) case PTRACE_SETSIGMASK: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) sigset_t new_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (addr != sizeof(sigset_t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * Every thread does recalc_sigpending() after resume, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * retarget_shared_pending() and recalc_sigpending() are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * called here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) spin_lock_irq(&child->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) child->blocked = new_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) spin_unlock_irq(&child->sighand->siglock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) clear_tsk_restore_sigmask(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) case PTRACE_INTERRUPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * Stop tracee without any side-effect on signal or job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * control. At least one trap is guaranteed to happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * after this request. If @child is already trapped, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * current trap is not disturbed and another trap will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * happen after the current trap is ended with PTRACE_CONT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * The actual trap might not be PTRACE_EVENT_STOP trap but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * the pending condition is cleared regardless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (unlikely(!seized || !lock_task_sighand(child, &flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * INTERRUPT doesn't disturb existing trap sans one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * exception. If ptracer issued LISTEN for the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * STOP, this INTERRUPT should clear LISTEN and re-trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * tracee into STOP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) unlock_task_sighand(child, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) case PTRACE_LISTEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * Listen for events. Tracee must be in STOP. It's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * resumed per-se but is not considered to be in TRACED by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * wait(2) or ptrace(2). If an async event (e.g. group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * stop state change) happens, tracee will enter STOP trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * again. Alternatively, ptracer can issue INTERRUPT to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * finish listening and re-trap tracee into STOP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (unlikely(!seized || !lock_task_sighand(child, &flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) si = child->last_siginfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) child->jobctl |= JOBCTL_LISTENING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * If NOTIFY is set, it means event happened between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * start of this trap and now. Trigger re-trap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (child->jobctl & JOBCTL_TRAP_NOTIFY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) ptrace_signal_wake_up(child, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) unlock_task_sighand(child, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) case PTRACE_DETACH: /* detach a process that was attached. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ret = ptrace_detach(child, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) #ifdef CONFIG_BINFMT_ELF_FDPIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) case PTRACE_GETFDPIC: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) struct mm_struct *mm = get_task_mm(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) unsigned long tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) ret = -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) switch (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) case PTRACE_GETFDPIC_EXEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) tmp = mm->context.exec_fdpic_loadmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) case PTRACE_GETFDPIC_INTERP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) tmp = mm->context.interp_fdpic_loadmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) ret = put_user(tmp, datalp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) #ifdef PTRACE_SINGLESTEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) case PTRACE_SINGLESTEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) #ifdef PTRACE_SINGLEBLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) case PTRACE_SINGLEBLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) #ifdef PTRACE_SYSEMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) case PTRACE_SYSEMU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) case PTRACE_SYSEMU_SINGLESTEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) case PTRACE_SYSCALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) case PTRACE_CONT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return ptrace_resume(child, request, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) case PTRACE_KILL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (child->exit_state) /* already dead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return ptrace_resume(child, request, SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) case PTRACE_GETREGSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) case PTRACE_SETREGSET: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) struct iovec kiov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) struct iovec __user *uiov = datavp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (!access_ok(uiov, sizeof(*uiov)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (__get_user(kiov.iov_base, &uiov->iov_base) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) __get_user(kiov.iov_len, &uiov->iov_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) ret = ptrace_regset(child, request, addr, &kiov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) ret = __put_user(kiov.iov_len, &uiov->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) case PTRACE_GET_SYSCALL_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) ret = ptrace_get_syscall_info(child, addr, datavp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) case PTRACE_SECCOMP_GET_FILTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) ret = seccomp_get_filter(child, addr, datavp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) case PTRACE_SECCOMP_GET_METADATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) ret = seccomp_get_metadata(child, addr, datavp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) #ifndef arch_ptrace_attach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) #define arch_ptrace_attach(child) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) unsigned long, data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct task_struct *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (request == PTRACE_TRACEME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) ret = ptrace_traceme();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) arch_ptrace_attach(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) child = find_get_task_by_vpid(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (!child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) ret = -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) ret = ptrace_attach(child, request, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * Some architectures need to do book-keeping after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * a ptrace attach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) arch_ptrace_attach(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) goto out_put_task_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) ret = ptrace_check_attach(child, request == PTRACE_KILL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) request == PTRACE_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) goto out_put_task_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) ret = arch_ptrace(child, request, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (ret || request != PTRACE_DETACH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) ptrace_unfreeze_traced(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) out_put_task_struct:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) put_task_struct(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) int copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (copied != sizeof(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) return put_user(tmp, (unsigned long __user *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) int copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) FOLL_FORCE | FOLL_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return (copied == sizeof(data)) ? 0 : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) #if defined CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) int compat_ptrace_request(struct task_struct *child, compat_long_t request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) compat_ulong_t addr, compat_ulong_t data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) compat_ulong_t __user *datap = compat_ptr(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) compat_ulong_t word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) kernel_siginfo_t siginfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) switch (request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) case PTRACE_PEEKTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) case PTRACE_PEEKDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) ret = ptrace_access_vm(child, addr, &word, sizeof(word),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) FOLL_FORCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (ret != sizeof(word))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) ret = put_user(word, datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) case PTRACE_POKETEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) case PTRACE_POKEDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) ret = ptrace_access_vm(child, addr, &data, sizeof(data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) FOLL_FORCE | FOLL_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) ret = (ret != sizeof(data) ? -EIO : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) case PTRACE_GETEVENTMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) ret = put_user((compat_ulong_t) child->ptrace_message, datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) case PTRACE_GETSIGINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) ret = ptrace_getsiginfo(child, &siginfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) ret = copy_siginfo_to_user32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) (struct compat_siginfo __user *) datap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) &siginfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) case PTRACE_SETSIGINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) ret = copy_siginfo_from_user32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) &siginfo, (struct compat_siginfo __user *) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) ret = ptrace_setsiginfo(child, &siginfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) case PTRACE_GETREGSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) case PTRACE_SETREGSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) struct iovec kiov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct compat_iovec __user *uiov =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) (struct compat_iovec __user *) datap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) compat_uptr_t ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) compat_size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (!access_ok(uiov, sizeof(*uiov)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (__get_user(ptr, &uiov->iov_base) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) __get_user(len, &uiov->iov_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) kiov.iov_base = compat_ptr(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) kiov.iov_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) ret = ptrace_regset(child, request, addr, &kiov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) ret = __put_user(kiov.iov_len, &uiov->iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) ret = ptrace_request(child, request, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) compat_long_t, addr, compat_long_t, data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) struct task_struct *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (request == PTRACE_TRACEME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) ret = ptrace_traceme();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) child = find_get_task_by_vpid(pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (!child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) ret = -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) ret = ptrace_attach(child, request, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * Some architectures need to do book-keeping after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * a ptrace attach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) arch_ptrace_attach(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) goto out_put_task_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) ret = ptrace_check_attach(child, request == PTRACE_KILL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) request == PTRACE_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) ret = compat_arch_ptrace(child, request, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (ret || request != PTRACE_DETACH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) ptrace_unfreeze_traced(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) out_put_task_struct:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) put_task_struct(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) #endif /* CONFIG_COMPAT */