^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/kernel/ptrace.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * By Ross Biro 1/23/92
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * edited by Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * ARM modifications Copyright (C) 2000 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/user.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/hw_breakpoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/regset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/audit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/tracehook.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <trace/events/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define REG_PC 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define REG_PSR 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * does not yet catch signals sent when the child dies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * in exit.c or in signal.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Breakpoint SWI instruction: SWI &9F0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define BREAKINST_ARM 0xef9f0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define BREAKINST_THUMB 0xdf00 /* fill this in later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * New breakpoints - use an undefined instruction. The ARM architecture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * reference manual guarantees that the following instruction space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * will produce an undefined instruction exception on all CPUs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * Thumb: 1101 1110 xxxx xxxx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define BREAKINST_ARM 0xe7f001f0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define BREAKINST_THUMB 0xde01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct pt_regs_offset {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define REG_OFFSET_NAME(r) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define REG_OFFSET_END {.name = NULL, .offset = 0}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static const struct pt_regs_offset regoffset_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) REG_OFFSET_NAME(r0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) REG_OFFSET_NAME(r1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) REG_OFFSET_NAME(r2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) REG_OFFSET_NAME(r3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) REG_OFFSET_NAME(r4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) REG_OFFSET_NAME(r5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) REG_OFFSET_NAME(r6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) REG_OFFSET_NAME(r7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) REG_OFFSET_NAME(r8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) REG_OFFSET_NAME(r9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) REG_OFFSET_NAME(r10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) REG_OFFSET_NAME(fp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) REG_OFFSET_NAME(ip),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) REG_OFFSET_NAME(sp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) REG_OFFSET_NAME(lr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) REG_OFFSET_NAME(pc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) REG_OFFSET_NAME(cpsr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) REG_OFFSET_NAME(ORIG_r0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) REG_OFFSET_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * regs_query_register_offset() - query register offset from its name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * @name: the name of a register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * regs_query_register_offset() returns the offset of a register in struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * pt_regs from its name. If the name is invalid, this returns -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int regs_query_register_offset(const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) const struct pt_regs_offset *roff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) for (roff = regoffset_table; roff->name != NULL; roff++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!strcmp(roff->name, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return roff->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * regs_query_register_name() - query register name from its offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * @offset: the offset of a register in struct pt_regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * regs_query_register_name() returns the name of a register from its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) const char *regs_query_register_name(unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) const struct pt_regs_offset *roff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) for (roff = regoffset_table; roff->name != NULL; roff++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (roff->offset == offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return roff->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * regs_within_kernel_stack() - check the address in the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * @regs: pt_regs which contains kernel stack pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * @addr: address which is checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * If @addr is within the kernel stack, it returns true. If not, returns false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return ((addr & ~(THREAD_SIZE - 1)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * regs_get_kernel_stack_nth() - get Nth entry of the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * @regs: pt_regs which contains kernel stack pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * @n: stack entry number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * is specified by @regs. If the @n th entry is NOT in the kernel stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * this returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) addr += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (regs_within_kernel_stack(regs, (unsigned long)addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * this routine will get a word off of the processes privileged stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * the offset is how far from the base addr as stored in the THREAD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * this routine assumes that all the privileged stacks are in our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * data space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline long get_user_reg(struct task_struct *task, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return task_pt_regs(task)->uregs[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * this routine will put a word on the processes privileged stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * the offset is how far from the base addr as stored in the THREAD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * this routine assumes that all the privileged stacks are in our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * data space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) put_user_reg(struct task_struct *task, int offset, long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct pt_regs newregs, *regs = task_pt_regs(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) newregs = *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) newregs.uregs[offset] = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (valid_user_regs(&newregs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) regs->uregs[offset] = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Called by kernel/ptrace.c when detaching..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) void ptrace_disable(struct task_struct *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* Nothing to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * Handle hitting a breakpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) void ptrace_break(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) force_sig_fault(SIGTRAP, TRAP_BRKPT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) (void __user *)instruction_pointer(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static int break_trap(struct pt_regs *regs, unsigned int instr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ptrace_break(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static struct undef_hook arm_break_hook = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) .instr_mask = 0x0fffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) .instr_val = 0x07f001f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) .cpsr_mask = PSR_T_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) .cpsr_val = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) .fn = break_trap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static struct undef_hook thumb_break_hook = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) .instr_mask = 0xffffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) .instr_val = 0x0000de01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) .cpsr_mask = PSR_T_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) .cpsr_val = PSR_T_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) .fn = break_trap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static struct undef_hook thumb2_break_hook = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) .instr_mask = 0xffffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) .instr_val = 0xf7f0a000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) .cpsr_mask = PSR_T_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) .cpsr_val = PSR_T_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) .fn = break_trap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static int __init ptrace_break_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) register_undef_hook(&arm_break_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) register_undef_hook(&thumb_break_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) register_undef_hook(&thumb2_break_hook);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) core_initcall(ptrace_break_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * Read the word at offset "off" into the "struct user". We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * actually access the pt_regs stored on the kernel stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) unsigned long __user *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (off & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (off == PT_TEXT_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) tmp = tsk->mm->start_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) else if (off == PT_DATA_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) tmp = tsk->mm->start_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) else if (off == PT_TEXT_END_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) tmp = tsk->mm->end_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) else if (off < sizeof(struct pt_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) tmp = get_user_reg(tsk, off >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) else if (off >= sizeof(struct user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return put_user(tmp, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * Write the word at offset "off" into "struct user". We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * actually access the pt_regs stored on the kernel stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (off & 3 || off >= sizeof(struct user))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (off >= sizeof(struct pt_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return put_user_reg(tsk, off >> 2, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #ifdef CONFIG_IWMMXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * Get the child iWMMXt state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct thread_info *thread = task_thread_info(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) iwmmxt_task_disable(thread); /* force it to ram */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) ? -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * Set the child iWMMXt state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct thread_info *thread = task_thread_info(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) iwmmxt_task_release(thread); /* force a reload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ? -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #ifdef CONFIG_CRUNCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * Get the child Crunch state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct thread_info *thread = task_thread_info(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) crunch_task_disable(thread); /* force it to ram */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ? -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * Set the child Crunch state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct thread_info *thread = task_thread_info(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) crunch_task_release(thread); /* force a reload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ? -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) #ifdef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * Convert a virtual register number into an index for a thread_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * breakpoint array. Breakpoints are identified using positive numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * whilst watchpoints are negative. The registers are laid out as pairs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * of (address, control), each pair mapping to a unique hw_breakpoint struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * Register 0 is reserved for describing resource information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static int ptrace_hbp_num_to_idx(long num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (num < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) num = (ARM_MAX_BRP << 1) - num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return (num - 1) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * Returns the virtual register number for the address of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * breakpoint at index idx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static long ptrace_hbp_idx_to_num(int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) long mid = ARM_MAX_BRP << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) long num = (idx << 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return num > mid ? mid - num : num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * Handle hitting a HW-breakpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static void ptrace_hbptriggered(struct perf_event *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct perf_sample_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) long num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (current->thread.debug.hbp[i] == bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) force_sig_ptrace_errno_trap((int)num, (void __user *)(bkpt->trigger));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Set ptrace breakpoint pointers to zero for this task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * This is required in order to prevent child processes from unregistering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * breakpoints held by their parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * Unregister breakpoints from this task and reset the pointers in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * the thread_struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct thread_struct *t = &tsk->thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (t->debug.hbp[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) unregister_hw_breakpoint(t->debug.hbp[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) t->debug.hbp[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static u32 ptrace_get_hbp_resource_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) u8 num_brps, num_wrps, debug_arch, wp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) u32 reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) num_brps = hw_breakpoint_slots(TYPE_INST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) num_wrps = hw_breakpoint_slots(TYPE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) debug_arch = arch_get_debug_arch();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) wp_len = arch_get_max_wp_len();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) reg |= debug_arch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) reg <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) reg |= wp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) reg <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) reg |= num_wrps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) reg <<= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) reg |= num_brps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct perf_event_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ptrace_breakpoint_init(&attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /* Initialise fields to sane defaults. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) attr.bp_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) attr.bp_len = HW_BREAKPOINT_LEN_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) attr.bp_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) attr.disabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static int ptrace_gethbpregs(struct task_struct *tsk, long num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) unsigned long __user *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) int idx, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct perf_event *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct arch_hw_breakpoint_ctrl arch_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (num == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) reg = ptrace_get_hbp_resource_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) idx = ptrace_hbp_num_to_idx(num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) bp = tsk->thread.debug.hbp[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (!bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) goto put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) arch_ctrl = counter_arch_bp(bp)->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * Fix up the len because we may have adjusted it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * to compensate for an unaligned address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) while (!(arch_ctrl.len & 0x1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) arch_ctrl.len >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (num & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) reg = bp->attr.bp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) reg = encode_ctrl_reg(arch_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (put_user(reg, data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static int ptrace_sethbpregs(struct task_struct *tsk, long num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unsigned long __user *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) int idx, gen_len, gen_type, implied_type, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) u32 user_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct perf_event *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct arch_hw_breakpoint_ctrl ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct perf_event_attr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (num == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) else if (num < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) implied_type = HW_BREAKPOINT_RW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) implied_type = HW_BREAKPOINT_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) idx = ptrace_hbp_num_to_idx(num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (get_user(user_val, data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) bp = tsk->thread.debug.hbp[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (!bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) bp = ptrace_hbp_create(tsk, implied_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (IS_ERR(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ret = PTR_ERR(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) tsk->thread.debug.hbp[idx] = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) attr = bp->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (num & 0x1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* Address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) attr.bp_addr = user_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* Control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) decode_ctrl_reg(user_val, &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if ((gen_type & implied_type) != gen_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) attr.bp_len = gen_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) attr.bp_type = gen_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) attr.disabled = !ctrl.enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ret = modify_user_hw_breakpoint(bp, &attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* regset get/set implementations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static int gpr_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return membuf_write(&to, task_pt_regs(target), sizeof(struct pt_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static int gpr_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct pt_regs newregs = *task_pt_regs(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) &newregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 0, sizeof(newregs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (!valid_user_regs(&newregs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) *task_pt_regs(target) = newregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static int fpa_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return membuf_write(&to, &task_thread_info(target)->fpstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) sizeof(struct user_fp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static int fpa_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct thread_info *thread = task_thread_info(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) thread->used_cp[1] = thread->used_cp[2] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) &thread->fpstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 0, sizeof(struct user_fp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) #ifdef CONFIG_VFP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * VFP register get/set implementations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * With respect to the kernel, struct user_fp is divided into three chunks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * 16 or 32 real VFP registers (d0-d15 or d0-31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * These are transferred to/from the real registers in the task's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * vfp_hard_struct. The number of registers depends on the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * 16 or 0 fake VFP registers (d16-d31 or empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * i.e., the user_vfp structure has space for 32 registers even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * the kernel doesn't have them all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * vfp_get() reads this chunk as zero where applicable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * vfp_set() ignores this chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * 1 word for the FPSCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static int vfp_get(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct membuf to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct thread_info *thread = task_thread_info(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) vfp_sync_hwstate(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) membuf_write(&to, vfp->fpregs, sizeof(vfp->fpregs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) membuf_zero(&to, user_fpscr_offset - sizeof(vfp->fpregs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return membuf_store(&to, vfp->fpscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * For vfp_set() a read-modify-write is done on the VFP registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * in order to avoid writing back a half-modified set of registers on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static int vfp_set(struct task_struct *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) const struct user_regset *regset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) unsigned int pos, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) const void *kbuf, const void __user *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct thread_info *thread = task_thread_info(target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct vfp_hard_struct new_vfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) vfp_sync_hwstate(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) new_vfp = thread->vfpstate.hard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) &new_vfp.fpregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) user_fpregs_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) user_fpregs_offset + sizeof(new_vfp.fpregs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) user_fpregs_offset + sizeof(new_vfp.fpregs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) user_fpscr_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) &new_vfp.fpscr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) user_fpscr_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) user_fpscr_offset + sizeof(new_vfp.fpscr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) thread->vfpstate.hard = new_vfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) vfp_flush_hwstate(thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) #endif /* CONFIG_VFP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) enum arm_regset {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) REGSET_GPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) REGSET_FPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) #ifdef CONFIG_VFP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) REGSET_VFP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static const struct user_regset arm_regsets[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) [REGSET_GPR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) .core_note_type = NT_PRSTATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) .n = ELF_NGREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) .size = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) .align = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) .regset_get = gpr_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) .set = gpr_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) [REGSET_FPR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * For the FPA regs in fpstate, the real fields are a mixture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * of sizes, so pretend that the registers are word-sized:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) .core_note_type = NT_PRFPREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) .n = sizeof(struct user_fp) / sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) .size = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) .align = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) .regset_get = fpa_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) .set = fpa_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) #ifdef CONFIG_VFP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) [REGSET_VFP] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * Pretend that the VFP regs are word-sized, since the FPSCR is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * a single word dangling at the end of struct user_vfp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) .core_note_type = NT_ARM_VFP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) .n = ARM_VFPREGS_SIZE / sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) .size = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) .align = sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) .regset_get = vfp_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) .set = vfp_set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) #endif /* CONFIG_VFP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static const struct user_regset_view user_arm_view = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) .name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) .regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) const struct user_regset_view *task_user_regset_view(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return &user_arm_view;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) long arch_ptrace(struct task_struct *child, long request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) unsigned long addr, unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) unsigned long __user *datap = (unsigned long __user *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) switch (request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) case PTRACE_PEEKUSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ret = ptrace_read_user(child, addr, datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) case PTRACE_POKEUSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ret = ptrace_write_user(child, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) case PTRACE_GETREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ret = copy_regset_to_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) &user_arm_view, REGSET_GPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 0, sizeof(struct pt_regs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) case PTRACE_SETREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) ret = copy_regset_from_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) &user_arm_view, REGSET_GPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 0, sizeof(struct pt_regs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) case PTRACE_GETFPREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ret = copy_regset_to_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) &user_arm_view, REGSET_FPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 0, sizeof(union fp_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) case PTRACE_SETFPREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ret = copy_regset_from_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) &user_arm_view, REGSET_FPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 0, sizeof(union fp_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) #ifdef CONFIG_IWMMXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) case PTRACE_GETWMMXREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ret = ptrace_getwmmxregs(child, datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) case PTRACE_SETWMMXREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) ret = ptrace_setwmmxregs(child, datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) case PTRACE_GET_THREAD_AREA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) ret = put_user(task_thread_info(child)->tp_value[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) case PTRACE_SET_SYSCALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) task_thread_info(child)->syscall = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) #ifdef CONFIG_CRUNCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) case PTRACE_GETCRUNCHREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ret = ptrace_getcrunchregs(child, datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) case PTRACE_SETCRUNCHREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ret = ptrace_setcrunchregs(child, datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) #ifdef CONFIG_VFP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) case PTRACE_GETVFPREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) ret = copy_regset_to_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) &user_arm_view, REGSET_VFP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 0, ARM_VFPREGS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) case PTRACE_SETVFPREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) ret = copy_regset_from_user(child,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) &user_arm_view, REGSET_VFP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 0, ARM_VFPREGS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) datap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) #ifdef CONFIG_HAVE_HW_BREAKPOINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) case PTRACE_GETHBPREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) ret = ptrace_gethbpregs(child, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) (unsigned long __user *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) case PTRACE_SETHBPREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ret = ptrace_sethbpregs(child, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) (unsigned long __user *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ret = ptrace_request(child, request, addr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) enum ptrace_syscall_dir {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) PTRACE_SYSCALL_ENTER = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) PTRACE_SYSCALL_EXIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static void tracehook_report_syscall(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) enum ptrace_syscall_dir dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) unsigned long ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * IP is used to denote syscall entry/exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * IP = 0 -> entry, =1 -> exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) ip = regs->ARM_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) regs->ARM_ip = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (dir == PTRACE_SYSCALL_EXIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) tracehook_report_syscall_exit(regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) else if (tracehook_report_syscall_entry(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) current_thread_info()->syscall = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) regs->ARM_ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) current_thread_info()->syscall = scno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (test_thread_flag(TIF_SYSCALL_TRACE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /* Do seccomp after ptrace; syscall may have changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (secure_computing() == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /* XXX: remove this once OABI gets fixed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) secure_computing_strict(current_thread_info()->syscall);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) /* Tracer or seccomp may have changed syscall. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) scno = current_thread_info()->syscall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) trace_sys_enter(regs, scno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) audit_syscall_entry(scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) regs->ARM_r3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return scno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) asmlinkage void syscall_trace_exit(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * Audit the syscall before anything else, as a debugger may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * come in and change the current registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) audit_syscall_exit(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * Note that we haven't updated the ->syscall field for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * current thread. This isn't a problem because it will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * been set on syscall entry and there hasn't been an opportunity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * for a PTRACE_SET_SYSCALL since then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) trace_sys_exit(regs, regs_return_value(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (test_thread_flag(TIF_SYSCALL_TRACE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }